8231189: Rename worker_i parameters to worker_id
authortschatzl
Mon, 23 Sep 2019 11:37:08 +0200
changeset 58264 4e96939a5746
parent 58263 4fbc534fdf69
child 58265 577e17cab93f
8231189: Rename worker_i parameters to worker_id Reviewed-by: kbarrett, sjohanss
src/hotspot/share/gc/g1/g1CollectedHeap.cpp
src/hotspot/share/gc/g1/g1CollectedHeap.hpp
src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp
src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp
src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp
src/hotspot/share/gc/g1/g1HotCardCache.cpp
src/hotspot/share/gc/g1/g1HotCardCache.hpp
src/hotspot/share/gc/g1/g1OopClosures.hpp
src/hotspot/share/gc/g1/g1OopClosures.inline.hpp
src/hotspot/share/gc/g1/g1RemSet.cpp
src/hotspot/share/gc/g1/g1RemSet.hpp
src/hotspot/share/gc/g1/g1RootProcessor.cpp
src/hotspot/share/gc/g1/g1RootProcessor.hpp
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Sep 23 11:37:08 2019 +0200
@@ -132,7 +132,7 @@
   RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
     _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
 
-  void do_card_ptr(CardValue* card_ptr, uint worker_i) {
+  void do_card_ptr(CardValue* card_ptr, uint worker_id) {
     HeapRegion* hr = region_for_card(card_ptr);
 
     // Should only dirty cards in regions that won't be freed.
@@ -1938,8 +1938,8 @@
   return _hrm->total_free_bytes();
 }
 
-void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
-  _hot_card_cache->drain(cl, worker_i);
+void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id) {
+  _hot_card_cache->drain(cl, worker_id);
 }
 
 // Computes the sum of the storage used by the various regions.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -991,7 +991,7 @@
   void scrub_rem_set();
 
   // Apply the given closure on all cards in the Hot Card Cache, emptying it.
-  void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i);
+  void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
 
   // The shared block offset table array.
   G1BlockOffsetTable* bot() const { return _bot; }
--- a/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentRefine.cpp	Mon Sep 23 11:37:08 2019 +0200
@@ -198,10 +198,10 @@
 
 static Thresholds calc_thresholds(size_t green_zone,
                                   size_t yellow_zone,
-                                  uint worker_i) {
+                                  uint worker_id) {
   double yellow_size = yellow_zone - green_zone;
   double step = yellow_size / G1ConcurrentRefine::max_num_threads();
-  if (worker_i == 0) {
+  if (worker_id == 0) {
     // Potentially activate worker 0 more aggressively, to keep
     // available buffers near green_zone value.  When yellow_size is
     // large we don't want to allow a full step to accumulate before
@@ -209,8 +209,8 @@
     // than green_zone buffers to be processed during scanning.
     step = MIN2(step, ParallelGCThreads / 2.0);
   }
-  size_t activate_offset = static_cast<size_t>(ceil(step * (worker_i + 1)));
-  size_t deactivate_offset = static_cast<size_t>(floor(step * worker_i));
+  size_t activate_offset = static_cast<size_t>(ceil(step * (worker_id + 1)));
+  size_t deactivate_offset = static_cast<size_t>(floor(step * worker_id));
   return Thresholds(green_zone + activate_offset,
                     green_zone + deactivate_offset);
 }
--- a/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1DirtyCardQueue.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -158,7 +158,7 @@
   // Stops processing a buffer if SuspendibleThreadSet::should_yield(),
   // returning the incompletely processed buffer to the completed buffer
   // list, for later processing of the remainder.
-  bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at);
+  bool refine_completed_buffer_concurrently(uint worker_id, size_t stop_at);
 
   // If a full collection is happening, reset partial logs, and release
   // completed ones: the full collection will make them all irrelevant.
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Mon Sep 23 11:37:08 2019 +0200
@@ -272,37 +272,37 @@
 #undef ASSERT_PHASE_UNINITIALIZED
 
 // record the time a phase took in seconds
-void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_i, double secs) {
-  _gc_par_phases[phase]->set(worker_i, secs);
+void G1GCPhaseTimes::record_time_secs(GCParPhases phase, uint worker_id, double secs) {
+  _gc_par_phases[phase]->set(worker_id, secs);
 }
 
 // add a number of seconds to a phase
-void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_i, double secs) {
-  _gc_par_phases[phase]->add(worker_i, secs);
+void G1GCPhaseTimes::add_time_secs(GCParPhases phase, uint worker_id, double secs) {
+  _gc_par_phases[phase]->add(worker_id, secs);
 }
 
-void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_i, double secs) {
-  if (_gc_par_phases[phase]->get(worker_i) == _gc_par_phases[phase]->uninitialized()) {
-    record_time_secs(phase, worker_i, secs);
+void G1GCPhaseTimes::record_or_add_time_secs(GCParPhases phase, uint worker_id, double secs) {
+  if (_gc_par_phases[phase]->get(worker_id) == _gc_par_phases[phase]->uninitialized()) {
+    record_time_secs(phase, worker_id, secs);
   } else {
-    add_time_secs(phase, worker_i, secs);
+    add_time_secs(phase, worker_id, secs);
   }
 }
 
-double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_i) {
-  return _gc_par_phases[phase]->get(worker_i);
+double G1GCPhaseTimes::get_time_secs(GCParPhases phase, uint worker_id) {
+  return _gc_par_phases[phase]->get(worker_id);
 }
 
-void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
-  _gc_par_phases[phase]->set_thread_work_item(worker_i, count, index);
+void G1GCPhaseTimes::record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
+  _gc_par_phases[phase]->set_thread_work_item(worker_id, count, index);
 }
 
-void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index) {
-  _gc_par_phases[phase]->set_or_add_thread_work_item(worker_i, count, index);
+void G1GCPhaseTimes::record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index) {
+  _gc_par_phases[phase]->set_or_add_thread_work_item(worker_id, count, index);
 }
 
-size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_i, uint index) {
-  return _gc_par_phases[phase]->get_thread_work_item(worker_i, index);
+size_t G1GCPhaseTimes::get_thread_work_item(GCParPhases phase, uint worker_id, uint index) {
+  return _gc_par_phases[phase]->get_thread_work_item(worker_id, index);
 }
 
 // return the average time for a phase in milliseconds
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -250,20 +250,20 @@
   static const char* phase_name(GCParPhases phase);
 
   // record the time a phase took in seconds
-  void record_time_secs(GCParPhases phase, uint worker_i, double secs);
+  void record_time_secs(GCParPhases phase, uint worker_id, double secs);
 
   // add a number of seconds to a phase
-  void add_time_secs(GCParPhases phase, uint worker_i, double secs);
+  void add_time_secs(GCParPhases phase, uint worker_id, double secs);
 
-  void record_or_add_time_secs(GCParPhases phase, uint worker_i, double secs);
+  void record_or_add_time_secs(GCParPhases phase, uint worker_id, double secs);
 
-  double get_time_secs(GCParPhases phase, uint worker_i);
+  double get_time_secs(GCParPhases phase, uint worker_id);
 
-  void record_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
+  void record_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index = 0);
 
-  void record_or_add_thread_work_item(GCParPhases phase, uint worker_i, size_t count, uint index = 0);
+  void record_or_add_thread_work_item(GCParPhases phase, uint worker_id, size_t count, uint index = 0);
 
-  size_t get_thread_work_item(GCParPhases phase, uint worker_i, uint index = 0);
+  size_t get_thread_work_item(GCParPhases phase, uint worker_id, uint index = 0);
 
   // return the average time for a phase in milliseconds
   double average_time_ms(GCParPhases phase);
--- a/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.cpp	Mon Sep 23 11:37:08 2019 +0200
@@ -84,7 +84,7 @@
   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
 }
 
-void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_i) {
+void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) {
   assert(default_use_cache(), "Drain only necessary if we use the hot card cache.");
 
   assert(_hot_cache != NULL, "Logic");
@@ -99,7 +99,7 @@
     for (size_t i = start_idx; i < end_idx; i++) {
       CardValue* card_ptr = _hot_cache[i];
       if (card_ptr != NULL) {
-        cl->do_card_ptr(card_ptr, worker_i);
+        cl->do_card_ptr(card_ptr, worker_id);
       } else {
         break;
       }
--- a/src/hotspot/share/gc/g1/g1HotCardCache.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1HotCardCache.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -113,7 +113,7 @@
 
   // Refine the cards that have delayed as a result of
   // being in the cache.
-  void drain(G1CardTableEntryClosure* cl, uint worker_i);
+  void drain(G1CardTableEntryClosure* cl, uint worker_id);
 
   // Set up for parallel processing of the cards in the hot cache
   void reset_hot_cache_claimed_index() {
--- a/src/hotspot/share/gc/g1/g1OopClosures.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -209,12 +209,12 @@
 
 class G1ConcurrentRefineOopClosure: public BasicOopIterateClosure {
   G1CollectedHeap* _g1h;
-  uint _worker_i;
+  uint _worker_id;
 
 public:
-  G1ConcurrentRefineOopClosure(G1CollectedHeap* g1h, uint worker_i) :
+  G1ConcurrentRefineOopClosure(G1CollectedHeap* g1h, uint worker_id) :
     _g1h(g1h),
-    _worker_i(worker_i) {
+    _worker_id(worker_id) {
   }
 
   virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
--- a/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1OopClosures.inline.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -156,7 +156,7 @@
 
   assert(to_rem_set != NULL, "Need per-region 'into' remsets.");
   if (to_rem_set->is_tracked()) {
-    to_rem_set->add_reference(p, _worker_i);
+    to_rem_set->add_reference(p, _worker_id);
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Mon Sep 23 11:37:08 2019 +0200
@@ -839,14 +839,14 @@
 public:
   G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state,
                                    G1ParScanThreadState* pss,
-                                   uint worker_i,
+                                   uint worker_id,
                                    G1GCPhaseTimes::GCParPhases scan_phase,
                                    G1GCPhaseTimes::GCParPhases code_roots_phase) :
     _pss(pss),
     _scan_state(scan_state),
     _scan_phase(scan_phase),
     _code_roots_phase(code_roots_phase),
-    _worker_id(worker_i),
+    _worker_id(worker_id),
     _opt_refs_scanned(0),
     _opt_refs_memory_used(0),
     _strong_code_root_scan_time(),
@@ -1061,7 +1061,7 @@
       _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
     {}
 
-    void do_card_ptr(CardValue* card_ptr, uint worker_i) {
+    void do_card_ptr(CardValue* card_ptr, uint worker_id) {
       // The only time we care about recording cards that
       // contain references that point into the collection set
       // is during RSet updating within an evacuation pause.
@@ -1263,7 +1263,7 @@
 }
 
 void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
-                                        uint worker_i) {
+                                        uint worker_id) {
   assert(!_g1h->is_gc_active(), "Only call concurrently");
 
   // Construct the region representing the card.
@@ -1375,7 +1375,7 @@
   MemRegion dirty_region(start, MIN2(scan_limit, end));
   assert(!dirty_region.is_empty(), "sanity");
 
-  G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i);
+  G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id);
   if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) {
     _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
     return;
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -117,7 +117,7 @@
   // Refine the card corresponding to "card_ptr". Safe to be called concurrently
   // to the mutator.
   void refine_card_concurrently(CardValue* card_ptr,
-                                uint worker_i);
+                                uint worker_id);
 
   // Print accumulated summary info from the start of the VM.
   void print_summary_info();
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp	Mon Sep 23 11:37:08 2019 +0200
@@ -74,19 +74,19 @@
     _lock(Mutex::leaf, "G1 Root Scan barrier lock", false, Monitor::_safepoint_check_never),
     _n_workers_discovered_strong_classes(0) {}
 
-void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_i) {
+void G1RootProcessor::evacuate_roots(G1ParScanThreadState* pss, uint worker_id) {
   G1GCPhaseTimes* phase_times = _g1h->phase_times();
 
-  G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_i);
+  G1EvacPhaseTimesTracker timer(phase_times, pss, G1GCPhaseTimes::ExtRootScan, worker_id);
 
   G1EvacuationRootClosures* closures = pss->closures();
-  process_java_roots(closures, phase_times, worker_i, closures->trace_metadata() /* notify_claimed_nmethods_done */);
+  process_java_roots(closures, phase_times, worker_id, closures->trace_metadata() /* notify_claimed_nmethods_done */);
 
-  process_vm_roots(closures, phase_times, worker_i);
+  process_vm_roots(closures, phase_times, worker_id);
 
   {
     // Now the CM ref_processor roots.
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_refProcessor_oops_do)) {
       // We need to treat the discovered reference lists of the
       // concurrent mark ref processor as roots and keep entries
@@ -97,7 +97,7 @@
   }
 
   if (closures->trace_metadata()) {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongRoots, worker_id);
     // Wait to make sure all workers passed the strong nmethods phase.
     wait_until_all_strong_nmethods_discovered();
   }
@@ -171,7 +171,7 @@
 
 void G1RootProcessor::process_java_roots(G1RootClosures* closures,
                                          G1GCPhaseTimes* phase_times,
-                                         uint worker_i,
+                                         uint worker_id,
                                          bool notify_claimed_nmethods_done) {
   // We need to make make sure that the "strong" nmethods are processed first
   // using the strong closure. Only after that we process the weakly reachable
@@ -190,7 +190,7 @@
   //
   // This is only required in the concurrent start pause with class unloading enabled.
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_id);
     bool is_par = n_workers() > 1;
     Threads::possibly_parallel_oops_do(is_par,
                                        closures->strong_oops(),
@@ -204,7 +204,7 @@
   }
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
       ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
     }
@@ -213,39 +213,39 @@
 
 void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
                                        G1GCPhaseTimes* phase_times,
-                                       uint worker_i) {
+                                       uint worker_id) {
   OopClosure* strong_roots = closures->strong_oops();
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_Universe_oops_do)) {
       Universe::oops_do(strong_roots);
     }
   }
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_JNIHandles_oops_do)) {
       JNIHandles::oops_do(strong_roots);
     }
   }
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_ObjectSynchronizer_oops_do)) {
       ObjectSynchronizer::oops_do(strong_roots);
     }
   }
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_Management_oops_do)) {
       Management::oops_do(strong_roots);
     }
   }
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_jvmti_oops_do)) {
       JvmtiExport::oops_do(strong_roots);
     }
@@ -253,7 +253,7 @@
 
 #if INCLUDE_AOT
   if (UseAOT) {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
         AOTLoader::oops_do(strong_roots);
     }
@@ -261,7 +261,7 @@
 #endif
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i);
+    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_id);
     if (_process_strong_tasks.try_claim_task(G1RP_PS_SystemDictionary_oops_do)) {
       SystemDictionary::oops_do(strong_roots);
     }
@@ -270,7 +270,7 @@
 
 void G1RootProcessor::process_code_cache_roots(CodeBlobClosure* code_closure,
                                                G1GCPhaseTimes* phase_times,
-                                               uint worker_i) {
+                                               uint worker_id) {
   if (_process_strong_tasks.try_claim_task(G1RP_PS_CodeCache_oops_do)) {
     CodeCache::blobs_do(code_closure);
   }
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp	Mon Sep 23 11:37:02 2019 +0200
+++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp	Mon Sep 23 11:37:08 2019 +0200
@@ -74,16 +74,16 @@
 
   void process_java_roots(G1RootClosures* closures,
                           G1GCPhaseTimes* phase_times,
-                          uint worker_i,
+                          uint worker_id,
                           bool notify_claimed_nmethods_done = false);
 
   void process_vm_roots(G1RootClosures* closures,
                         G1GCPhaseTimes* phase_times,
-                        uint worker_i);
+                        uint worker_id);
 
   void process_code_cache_roots(CodeBlobClosure* code_closure,
                                 G1GCPhaseTimes* phase_times,
-                                uint worker_i);
+                                uint worker_id);
 
 public:
   G1RootProcessor(G1CollectedHeap* g1h, uint n_workers);