8210119: Rename SubTasksDone::is_task_claimed
authorkbarrett
Thu, 30 Aug 2018 16:16:19 -0400
changeset 51598 c88019b32bc4
parent 51597 4c78f4fd8370
child 51599 3198179d97fa
8210119: Rename SubTasksDone::is_task_claimed Summary: Renamed to try_claim_task and inverted result. Reviewed-by: coleenp, sjohanss
src/hotspot/share/gc/cms/cmsCardTable.cpp
src/hotspot/share/gc/cms/cmsHeap.cpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
src/hotspot/share/gc/g1/g1RootProcessor.cpp
src/hotspot/share/gc/shared/genCollectedHeap.cpp
src/hotspot/share/gc/shared/preservedMarks.cpp
src/hotspot/share/gc/shared/weakProcessor.inline.hpp
src/hotspot/share/gc/shared/workgroup.cpp
src/hotspot/share/gc/shared/workgroup.hpp
src/hotspot/share/runtime/safepoint.cpp
--- a/src/hotspot/share/gc/cms/cmsCardTable.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsCardTable.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -79,7 +79,7 @@
   pst->set_n_tasks(n_strides);
 
   uint stride = 0;
-  while (!pst->is_task_claimed(/* reference */ stride)) {
+  while (pst->try_claim_task(/* reference */ stride)) {
     process_stride(sp, mr, stride, n_strides,
                    cl, ct,
                    lowest_non_clean,
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -231,7 +231,7 @@
   }
 
   if (young_gen_as_roots &&
-      !_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+      _process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
     root_closure->set_generation(young_gen());
     young_gen()->oop_iterate(root_closure);
     root_closure->reset_generation();
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -3225,7 +3225,7 @@
   }
 
   size_t chunk_size = sp->marking_task_size();
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
+  while (pst->try_claim_task(/* reference */ nth_task)) {
     // Having claimed the nth task in this space,
     // compute the chunk that it corresponds to:
     MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
@@ -4494,7 +4494,7 @@
   if (n_tasks > 0) {
     assert(pst->valid(), "Uninitialized use?");
     HeapWord *start, *end;
-    while (!pst->is_task_claimed(/* reference */ nth_task)) {
+    while (pst->try_claim_task(/* reference */ nth_task)) {
       // We claimed task # nth_task; compute its boundaries.
       if (chunk_top == 0) {  // no samples were taken
         assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
@@ -4580,7 +4580,7 @@
   assert(is_aligned(start_addr, alignment), "Check alignment");
   assert(is_aligned(chunk_size, alignment), "Check alignment");
 
-  while (!pst->is_task_claimed(/* reference */ nth_task)) {
+  while (pst->try_claim_task(/* reference */ nth_task)) {
     // Having claimed the nth_task, compute corresponding mem-region,
     // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
     // The alignment restriction ensures that we do not need any
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -95,7 +95,7 @@
   {
     // Now the CM ref_processor roots.
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_refProcessor_oops_do)) {
       // We need to treat the discovered reference lists of the
       // concurrent mark ref processor as roots and keep entries
       // (which are added by the marking threads) on them live
@@ -127,7 +127,7 @@
   // as implicitly live).
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_filter_satb_buffers) && _g1h->collector_state()->mark_or_rebuild_in_progress()) {
       G1BarrierSet::satb_mark_queue_set().filter_thread_buffers();
     }
   }
@@ -224,7 +224,7 @@
   // let the thread process the weak CLDs and nmethods.
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
       ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
     }
   }
@@ -245,35 +245,35 @@
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_Universe_oops_do)) {
       Universe::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_JNIHandles_oops_do)) {
       JNIHandles::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_ObjectSynchronizer_oops_do)) {
       ObjectSynchronizer::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
       Management::oops_do(strong_roots);
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_jvmti_oops_do)) {
       JvmtiExport::oops_do(strong_roots);
     }
   }
@@ -281,7 +281,7 @@
 #if INCLUDE_AOT
   if (UseAOT) {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_aot_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
         AOTLoader::oops_do(strong_roots);
     }
   }
@@ -289,7 +289,7 @@
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) {
+    if (_process_strong_tasks.try_claim_task(G1RP_PS_SystemDictionary_oops_do)) {
       SystemDictionary::oops_do(strong_roots);
     }
   }
@@ -308,7 +308,7 @@
 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
                                                G1GCPhaseTimes* phase_times,
                                                uint worker_i) {
-  if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
+  if (_process_strong_tasks.try_claim_task(G1RP_PS_CodeCache_oops_do)) {
     CodeCache::blobs_do(code_closure);
   }
 }
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -792,7 +792,7 @@
   // could be trying to change the termination condition while the task
   // is executing in another GC worker.
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_ClassLoaderDataGraph_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_ClassLoaderDataGraph_oops_do)) {
     ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure);
   }
 
@@ -802,32 +802,32 @@
   bool is_par = scope->n_threads() > 1;
   Threads::possibly_parallel_oops_do(is_par, strong_roots, roots_from_code_p);
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_Universe_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_Universe_oops_do)) {
     Universe::oops_do(strong_roots);
   }
   // Global (strong) JNI handles
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_JNIHandles_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_JNIHandles_oops_do)) {
     JNIHandles::oops_do(strong_roots);
   }
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_ObjectSynchronizer_oops_do)) {
     ObjectSynchronizer::oops_do(strong_roots);
   }
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_Management_oops_do)) {
     Management::oops_do(strong_roots);
   }
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_jvmti_oops_do)) {
     JvmtiExport::oops_do(strong_roots);
   }
-  if (UseAOT && !_process_strong_tasks->is_task_claimed(GCH_PS_aot_oops_do)) {
+  if (UseAOT && _process_strong_tasks->try_claim_task(GCH_PS_aot_oops_do)) {
     AOTLoader::oops_do(strong_roots);
   }
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_SystemDictionary_oops_do)) {
     SystemDictionary::oops_do(strong_roots);
   }
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_CodeCache_oops_do)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_CodeCache_oops_do)) {
     if (so & SO_ScavengeCodeCache) {
       assert(code_roots != NULL, "must supply closure for code cache");
 
@@ -876,7 +876,7 @@
                 cld_closure, cld_closure, &mark_code_closure);
   process_string_table_roots(scope, root_closure, par_state_string);
 
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
+  if (_process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
     root_closure->reset_generation();
   }
 
--- a/src/hotspot/share/gc/shared/preservedMarks.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/shared/preservedMarks.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -100,7 +100,7 @@
 public:
   virtual void work(uint worker_id) {
     uint task_id = 0;
-    while (!_sub_tasks.is_task_claimed(/* reference */ task_id)) {
+    while (_sub_tasks.try_claim_task(/* reference */ task_id)) {
       _preserved_marks_set->get(task_id)->restore_and_increment(_total_size_addr);
     }
     _sub_tasks.all_tasks_completed();
--- a/src/hotspot/share/gc/shared/weakProcessor.inline.hpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/shared/weakProcessor.inline.hpp	Thu Aug 30 16:16:19 2018 -0400
@@ -47,7 +47,7 @@
   FOR_EACH_WEAK_PROCESSOR_PHASE(phase) {
     if (WeakProcessorPhases::is_serial(phase)) {
       uint serial_index = WeakProcessorPhases::serial_index(phase);
-      if (!_serial_phases_done.is_task_claimed(serial_index)) {
+      if (_serial_phases_done.try_claim_task(serial_index)) {
         WeakProcessorPhaseTimeTracker pt(_phase_times, phase);
         WeakProcessorPhases::processor(phase)(is_alive, keep_alive);
       }
--- a/src/hotspot/share/gc/shared/workgroup.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/shared/workgroup.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -429,16 +429,16 @@
 #endif
 }
 
-bool SubTasksDone::is_task_claimed(uint t) {
+bool SubTasksDone::try_claim_task(uint t) {
   assert(t < _n_tasks, "bad task id.");
   uint old = _tasks[t];
   if (old == 0) {
     old = Atomic::cmpxchg(1u, &_tasks[t], 0u);
   }
   assert(_tasks[t] == 1, "What else?");
-  bool res = old != 0;
+  bool res = old == 0;
 #ifdef ASSERT
-  if (!res) {
+  if (res) {
     assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
     Atomic::inc(&_claimed);
   }
@@ -476,16 +476,16 @@
   return _n_threads > 0;
 }
 
-bool SequentialSubTasksDone::is_task_claimed(uint& t) {
+bool SequentialSubTasksDone::try_claim_task(uint& t) {
   t = _n_claimed;
   while (t < _n_tasks) {
     uint res = Atomic::cmpxchg(t+1, &_n_claimed, t);
     if (res == t) {
-      return false;
+      return true;
     }
     t = res;
   }
-  return true;
+  return false;
 }
 
 bool SequentialSubTasksDone::all_tasks_completed() {
--- a/src/hotspot/share/gc/shared/workgroup.hpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/gc/shared/workgroup.hpp	Thu Aug 30 16:16:19 2018 -0400
@@ -332,9 +332,10 @@
   // True iff the object is in a valid state.
   bool valid();
 
-  // Returns "false" if the task "t" is unclaimed, and ensures that task is
-  // claimed.  The task "t" is required to be within the range of "this".
-  bool is_task_claimed(uint t);
+  // Attempt to claim the task "t", returning true if successful,
+  // false if it has already been claimed.  The task "t" is required
+  // to be within the range of "this".
+  bool try_claim_task(uint t);
 
   // The calling thread asserts that it has attempted to claim all the
   // tasks that it will try to claim.  Every thread in the parallel task
@@ -391,11 +392,11 @@
   // agree on the number of tasks.
   void set_n_tasks(uint t) { _n_tasks = t; }
 
-  // Returns false if the next task in the sequence is unclaimed,
-  // and ensures that it is claimed. Will set t to be the index
-  // of the claimed task in the sequence. Will return true if
-  // the task cannot be claimed and there are none left to claim.
-  bool is_task_claimed(uint& t);
+  // Attempt to claim the next unclaimed task in the sequence,
+  // returning true if successful, with t set to the index of the
+  // claimed task.  Returns false if there are no more unclaimed tasks
+  // in the sequence.
+  bool try_claim_task(uint& t);
 
   // The calling thread asserts that it has attempted to claim
   // all the tasks it possibly can in the sequence. Every thread
--- a/src/hotspot/share/runtime/safepoint.cpp	Thu Aug 30 12:39:26 2018 -0700
+++ b/src/hotspot/share/runtime/safepoint.cpp	Thu Aug 30 16:16:19 2018 -0400
@@ -628,7 +628,7 @@
     // All threads deflate monitors and mark nmethods (if necessary).
     Threads::possibly_parallel_threads_do(true, &_cleanup_threads_cl);
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_DEFLATE_MONITORS)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_DEFLATE_MONITORS)) {
       const char* name = "deflating idle monitors";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
@@ -638,7 +638,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_UPDATE_INLINE_CACHES)) {
       const char* name = "updating inline caches";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
@@ -648,7 +648,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_COMPILATION_POLICY)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_COMPILATION_POLICY)) {
       const char* name = "compilation policy safepoint handler";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));
@@ -658,7 +658,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH)) {
       if (SymbolTable::needs_rehashing()) {
         const char* name = "rehashing symbol table";
         EventSafepointCleanupTask event;
@@ -670,7 +670,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_STRING_TABLE_REHASH)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_STRING_TABLE_REHASH)) {
       if (StringTable::needs_rehashing()) {
         const char* name = "rehashing string table";
         EventSafepointCleanupTask event;
@@ -682,7 +682,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_CLD_PURGE)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_CLD_PURGE)) {
       // CMS delays purging the CLDG until the beginning of the next safepoint and to
       // make sure concurrent sweep is done
       const char* name = "purging class loader data graph";
@@ -694,7 +694,7 @@
       }
     }
 
-    if (!_subtasks.is_task_claimed(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) {
+    if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE)) {
       const char* name = "resizing system dictionaries";
       EventSafepointCleanupTask event;
       TraceTime timer(name, TRACETIME_LOG(Info, safepoint, cleanup));