7121618: Change type of number of GC workers to unsigned int.
authorjmasa
Wed, 14 Dec 2011 13:34:57 -0800
changeset 11396 917d8673b5ef
parent 11395 33260c27554b
child 11397 7dc5c8f572b0
7121618: Change type of number of GC workers to unsigned int. Summary: Change variables representing the number of GC workers to uint from int and size_t. Change the parameter in work(int i) to work(uint worker_id). Reviewed-by: brutisso, tonyp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp
hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp
hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp
hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp
hotspot/src/share/vm/gc_interface/collectedHeap.hpp
hotspot/src/share/vm/memory/genCollectedHeap.cpp
hotspot/src/share/vm/memory/genCollectedHeap.hpp
hotspot/src/share/vm/memory/referenceProcessor.cpp
hotspot/src/share/vm/memory/referenceProcessor.hpp
hotspot/src/share/vm/memory/sharedHeap.cpp
hotspot/src/share/vm/memory/sharedHeap.hpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/utilities/workgroup.cpp
hotspot/src/share/vm/utilities/workgroup.hpp
hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp
hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -2598,7 +2598,7 @@
 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
-int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
+uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
 
 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
   _cfls(cfls)
@@ -2732,7 +2732,7 @@
         // Update globals stats for num_blocks used
         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
         _global_num_workers[i]++;
-        assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
+        assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
         if (num_retire > 0) {
           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
           // Reset this list.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -631,7 +631,7 @@
   static AdaptiveWeightedAverage
                  _blocks_to_claim  [CompactibleFreeListSpace::IndexSetSize];
   static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
-  static int    _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
+  static uint   _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
   size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
 
   // Internal work method
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -3779,7 +3779,7 @@
     terminator()->reset_for_reuse(active_workers);
   }
 
-  void work(int i);
+  void work(uint worker_id);
   bool should_yield() {
     return    ConcurrentMarkSweepThread::should_yield()
            && !_collector->foregroundGCIsActive()
@@ -3852,7 +3852,7 @@
 //    . if neither is available, offer termination
 // -- Terminate and return result
 //
-void CMSConcMarkingTask::work(int i) {
+void CMSConcMarkingTask::work(uint worker_id) {
   elapsedTimer _timer;
   ResourceMark rm;
   HandleMark hm;
@@ -3860,37 +3860,40 @@
   DEBUG_ONLY(_collector->verify_overflow_empty();)
 
   // Before we begin work, our work queue should be empty
-  assert(work_queue(i)->size() == 0, "Expected to be empty");
+  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
   // Scan the bitmap covering _cms_space, tracing through grey objects.
   _timer.start();
-  do_scan_and_mark(i, _cms_space);
+  do_scan_and_mark(worker_id, _cms_space);
   _timer.stop();
   if (PrintCMSStatistics != 0) {
     gclog_or_tty->print_cr("Finished cms space scanning in %dth thread: %3.3f sec",
-      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
+      worker_id, _timer.seconds());
+      // XXX: need xxx/xxx type of notation, two timers
   }
 
   // ... do the same for the _perm_space
   _timer.reset();
   _timer.start();
-  do_scan_and_mark(i, _perm_space);
+  do_scan_and_mark(worker_id, _perm_space);
   _timer.stop();
   if (PrintCMSStatistics != 0) {
     gclog_or_tty->print_cr("Finished perm space scanning in %dth thread: %3.3f sec",
-      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
+      worker_id, _timer.seconds());
+      // XXX: need xxx/xxx type of notation, two timers
   }
 
   // ... do work stealing
   _timer.reset();
   _timer.start();
-  do_work_steal(i);
+  do_work_steal(worker_id);
   _timer.stop();
   if (PrintCMSStatistics != 0) {
     gclog_or_tty->print_cr("Finished work stealing in %dth thread: %3.3f sec",
-      i, _timer.seconds()); // XXX: need xxx/xxx type of notation, two timers
+      worker_id, _timer.seconds());
+      // XXX: need xxx/xxx type of notation, two timers
   }
   assert(_collector->_markStack.isEmpty(), "Should have been emptied");
-  assert(work_queue(i)->size() == 0, "Should have been emptied");
+  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
   // Note that under the current task protocol, the
   // following assertion is true even of the spaces
   // expanded since the completion of the concurrent
@@ -3946,7 +3949,7 @@
   // We allow that there may be no tasks to do here because
   // we are restarting after a stack overflow.
   assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
-  int nth_task = 0;
+  uint nth_task = 0;
 
   HeapWord* aligned_start = sp->bottom();
   if (sp->used_region().contains(_restart_addr)) {
@@ -5075,7 +5078,7 @@
   ParallelTaskTerminator* terminator() { return &_term; }
   int n_workers() { return _n_workers; }
 
-  void work(int i);
+  void work(uint worker_id);
 
  private:
   // Work method in support of parallel rescan ... of young gen spaces
@@ -5096,7 +5099,7 @@
 // also is passed to do_dirty_card_rescan_tasks() and to
 // do_work_steal() to select the i-th task_queue.
 
-void CMSParRemarkTask::work(int i) {
+void CMSParRemarkTask::work(uint worker_id) {
   elapsedTimer _timer;
   ResourceMark rm;
   HandleMark   hm;
@@ -5107,7 +5110,7 @@
   Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector,
     _collector->_span, _collector->ref_processor(),
     &(_collector->_markBitMap),
-    work_queue(i), &(_collector->_revisitStack));
+    work_queue(worker_id), &(_collector->_revisitStack));
 
   // Rescan young gen roots first since these are likely
   // coarsely partitioned and may, on that account, constitute
@@ -5128,15 +5131,15 @@
     assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
     assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
 
-    do_young_space_rescan(i, &par_mrias_cl, to_space, NULL, 0);
-    do_young_space_rescan(i, &par_mrias_cl, from_space, sca, sct);
-    do_young_space_rescan(i, &par_mrias_cl, eden_space, eca, ect);
+    do_young_space_rescan(worker_id, &par_mrias_cl, to_space, NULL, 0);
+    do_young_space_rescan(worker_id, &par_mrias_cl, from_space, sca, sct);
+    do_young_space_rescan(worker_id, &par_mrias_cl, eden_space, eca, ect);
 
     _timer.stop();
     if (PrintCMSStatistics != 0) {
       gclog_or_tty->print_cr(
         "Finished young gen rescan work in %dth thread: %3.3f sec",
-        i, _timer.seconds());
+        worker_id, _timer.seconds());
     }
   }
 
@@ -5158,7 +5161,7 @@
   if (PrintCMSStatistics != 0) {
     gclog_or_tty->print_cr(
       "Finished remaining root rescan work in %dth thread: %3.3f sec",
-      i, _timer.seconds());
+      worker_id, _timer.seconds());
   }
 
   // ---------- rescan dirty cards ------------
@@ -5167,26 +5170,26 @@
 
   // Do the rescan tasks for each of the two spaces
   // (cms_space and perm_space) in turn.
-  // "i" is passed to select the "i-th" task_queue
-  do_dirty_card_rescan_tasks(_cms_space, i, &par_mrias_cl);
-  do_dirty_card_rescan_tasks(_perm_space, i, &par_mrias_cl);
+  // "worker_id" is passed to select the task_queue for "worker_id"
+  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
+  do_dirty_card_rescan_tasks(_perm_space, worker_id, &par_mrias_cl);
   _timer.stop();
   if (PrintCMSStatistics != 0) {
     gclog_or_tty->print_cr(
       "Finished dirty card rescan work in %dth thread: %3.3f sec",
-      i, _timer.seconds());
+      worker_id, _timer.seconds());
   }
 
   // ---------- steal work from other threads ...
   // ---------- ... and drain overflow list.
   _timer.reset();
   _timer.start();
-  do_work_steal(i, &par_mrias_cl, _collector->hash_seed(i));
+  do_work_steal(worker_id, &par_mrias_cl, _collector->hash_seed(worker_id));
   _timer.stop();
   if (PrintCMSStatistics != 0) {
     gclog_or_tty->print_cr(
       "Finished work stealing in %dth thread: %3.3f sec",
-      i, _timer.seconds());
+      worker_id, _timer.seconds());
   }
 }
 
@@ -5207,8 +5210,8 @@
   SequentialSubTasksDone* pst = space->par_seq_tasks();
   assert(pst->valid(), "Uninitialized use?");
 
-  int nth_task = 0;
-  int n_tasks  = pst->n_tasks();
+  uint nth_task = 0;
+  uint n_tasks  = pst->n_tasks();
 
   HeapWord *start, *end;
   while (!pst->is_task_claimed(/* reference */ nth_task)) {
@@ -5220,12 +5223,12 @@
     } else if (nth_task == 0) {
       start = space->bottom();
       end   = chunk_array[nth_task];
-    } else if (nth_task < (jint)chunk_top) {
+    } else if (nth_task < (uint)chunk_top) {
       assert(nth_task >= 1, "Control point invariant");
       start = chunk_array[nth_task - 1];
       end   = chunk_array[nth_task];
     } else {
-      assert(nth_task == (jint)chunk_top, "Control point invariant");
+      assert(nth_task == (uint)chunk_top, "Control point invariant");
       start = chunk_array[chunk_top - 1];
       end   = space->top();
     }
@@ -5288,7 +5291,7 @@
 
   SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
   assert(pst->valid(), "Uninitialized use?");
-  int nth_task = 0;
+  uint nth_task = 0;
   const int alignment = CardTableModRefBS::card_size * BitsPerWord;
   MemRegion span = sp->used_region();
   HeapWord* start_addr = span.start();
@@ -5736,26 +5739,26 @@
                      CMSParKeepAliveClosure* keep_alive,
                      int* seed);
 
-  virtual void work(int i);
+  virtual void work(uint worker_id);
 };
 
-void CMSRefProcTaskProxy::work(int i) {
+void CMSRefProcTaskProxy::work(uint worker_id) {
   assert(_collector->_span.equals(_span), "Inconsistency in _span");
   CMSParKeepAliveClosure par_keep_alive(_collector, _span,
                                         _mark_bit_map,
                                         &_collector->_revisitStack,
-                                        work_queue(i));
+                                        work_queue(worker_id));
   CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
                                                  _mark_bit_map,
                                                  &_collector->_revisitStack,
-                                                 work_queue(i));
+                                                 work_queue(worker_id));
   CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
-  _task.work(i, is_alive_closure, par_keep_alive, par_drain_stack);
+  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
   if (_task.marks_oops_alive()) {
-    do_work_steal(i, &par_drain_stack, &par_keep_alive,
-                  _collector->hash_seed(i));
-  }
-  assert(work_queue(i)->size() == 0, "work_queue should be empty");
+    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive,
+                  _collector->hash_seed(worker_id));
+  }
+  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
   assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
 }
 
@@ -5769,9 +5772,9 @@
       _task(task)
   { }
 
-  virtual void work(int i)
+  virtual void work(uint worker_id)
   {
-    _task.work(i);
+    _task.work(worker_id);
   }
 };
 
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -264,7 +264,7 @@
     // or some improperly initialized variable with leads to no
     // active threads, protect against that in a product build.
     n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
-                     1);
+                     1U);
   }
   size_t max_waste = n_threads * chunkSize;
   // it should be aligned with respect to chunkSize
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -458,8 +458,8 @@
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 #endif // _MSC_VER
 
-size_t ConcurrentMark::scale_parallel_threads(size_t n_par_threads) {
-  return MAX2((n_par_threads + 2) / 4, (size_t)1);
+uint ConcurrentMark::scale_parallel_threads(uint n_par_threads) {
+  return MAX2((n_par_threads + 2) / 4, 1U);
 }
 
 ConcurrentMark::ConcurrentMark(ReservedSpace rs,
@@ -486,7 +486,7 @@
   _regionStack(),
   // _finger set in set_non_marking_state
 
-  _max_task_num(MAX2(ParallelGCThreads, (size_t)1)),
+  _max_task_num(MAX2((uint)ParallelGCThreads, 1U)),
   // _active_tasks set in set_non_marking_state
   // _tasks set inside the constructor
   _task_queues(new CMTaskQueueSet((int) _max_task_num)),
@@ -506,7 +506,6 @@
   _cleanup_times(),
   _total_counting_time(0.0),
   _total_rs_scrub_time(0.0),
-
   _parallel_workers(NULL) {
   CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
   if (verbose_level < no_verbose) {
@@ -568,7 +567,7 @@
       // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
       // if both are set
 
-      _parallel_marking_threads = ConcGCThreads;
+      _parallel_marking_threads = (uint) ConcGCThreads;
       _max_parallel_marking_threads = _parallel_marking_threads;
       _sleep_factor             = 0.0;
       _marking_task_overhead    = 1.0;
@@ -589,12 +588,12 @@
       double sleep_factor =
                          (1.0 - marking_task_overhead) / marking_task_overhead;
 
-      _parallel_marking_threads = (size_t) marking_thread_num;
+      _parallel_marking_threads = (uint) marking_thread_num;
       _max_parallel_marking_threads = _parallel_marking_threads;
       _sleep_factor             = sleep_factor;
       _marking_task_overhead    = marking_task_overhead;
     } else {
-      _parallel_marking_threads = scale_parallel_threads(ParallelGCThreads);
+      _parallel_marking_threads = scale_parallel_threads((uint)ParallelGCThreads);
       _max_parallel_marking_threads = _parallel_marking_threads;
       _sleep_factor             = 0.0;
       _marking_task_overhead    = 1.0;
@@ -618,7 +617,7 @@
 
     guarantee(parallel_marking_threads() > 0, "peace of mind");
     _parallel_workers = new FlexibleWorkGang("G1 Parallel Marking Threads",
-         (int) _max_parallel_marking_threads, false, true);
+         _max_parallel_marking_threads, false, true);
     if (_parallel_workers == NULL) {
       vm_exit_during_initialization("Failed necessary allocation.");
     } else {
@@ -691,7 +690,7 @@
   set_concurrent_marking_in_progress();
 }
 
-void ConcurrentMark::set_phase(size_t active_tasks, bool concurrent) {
+void ConcurrentMark::set_phase(uint active_tasks, bool concurrent) {
   assert(active_tasks <= _max_task_num, "we should not have more");
 
   _active_tasks = active_tasks;
@@ -1048,7 +1047,7 @@
   ConcurrentMarkThread* _cmt;
 
 public:
-  void work(int worker_i) {
+  void work(uint worker_id) {
     assert(Thread::current()->is_ConcurrentGC_thread(),
            "this should only be done by a conc GC thread");
     ResourceMark rm;
@@ -1057,8 +1056,8 @@
 
     ConcurrentGCThread::stsJoin();
 
-    assert((size_t) worker_i < _cm->active_tasks(), "invariant");
-    CMTask* the_task = _cm->task(worker_i);
+    assert(worker_id < _cm->active_tasks(), "invariant");
+    CMTask* the_task = _cm->task(worker_id);
     the_task->record_start_time();
     if (!_cm->has_aborted()) {
       do {
@@ -1076,7 +1075,7 @@
         double elapsed_time_sec = end_time_sec - start_time_sec;
         _cm->clear_has_overflown();
 
-        bool ret = _cm->do_yield_check(worker_i);
+        bool ret = _cm->do_yield_check(worker_id);
 
         jlong sleep_time_ms;
         if (!_cm->has_aborted() && the_task->has_aborted()) {
@@ -1105,7 +1104,7 @@
     ConcurrentGCThread::stsLeave();
 
     double end_vtime = os::elapsedVTime();
-    _cm->update_accum_task_vtime(worker_i, end_vtime - start_vtime);
+    _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
   }
 
   CMConcurrentMarkingTask(ConcurrentMark* cm,
@@ -1117,9 +1116,9 @@
 
 // Calculates the number of active workers for a concurrent
 // phase.
-size_t ConcurrentMark::calc_parallel_marking_threads() {
+uint ConcurrentMark::calc_parallel_marking_threads() {
   if (G1CollectedHeap::use_parallel_gc_threads()) {
-    size_t n_conc_workers = 0;
+    uint n_conc_workers = 0;
     if (!UseDynamicNumberOfGCThreads ||
         (!FLAG_IS_DEFAULT(ConcGCThreads) &&
          !ForceDynamicNumberOfGCThreads)) {
@@ -1159,7 +1158,7 @@
   assert(parallel_marking_threads() <= max_parallel_marking_threads(),
     "Maximum number of marking threads exceeded");
 
-  size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
+  uint active_workers = MAX2(1U, parallel_marking_threads());
 
   // Parallel task terminator is set in "set_phase()"
   set_phase(active_workers, true /* concurrent */);
@@ -1503,7 +1502,7 @@
 protected:
   G1CollectedHeap* _g1h;
   CMBitMap* _bm;
-  size_t _n_workers;
+  uint    _n_workers;
   size_t *_live_bytes;
   size_t *_used_bytes;
   BitMap* _region_bm;
@@ -1535,13 +1534,13 @@
     FREE_C_HEAP_ARRAY(size_t, _used_bytes);
   }
 
-  void work(int i) {
+  void work(uint worker_id) {
     CalcLiveObjectsClosure calccl(true /*final*/,
                                   _bm, _g1h->concurrent_mark(),
                                   _region_bm, _card_bm);
     calccl.no_yield();
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&calccl, i,
+      _g1h->heap_region_par_iterate_chunked(&calccl, worker_id,
                                             (int) _n_workers,
                                             HeapRegion::FinalCountClaimValue);
     } else {
@@ -1549,19 +1548,19 @@
     }
     assert(calccl.complete(), "Shouldn't have yielded!");
 
-    assert((size_t) i < _n_workers, "invariant");
-    _live_bytes[i] = calccl.tot_live();
-    _used_bytes[i] = calccl.tot_used();
+    assert(worker_id < _n_workers, "invariant");
+    _live_bytes[worker_id] = calccl.tot_live();
+    _used_bytes[worker_id] = calccl.tot_used();
   }
   size_t live_bytes()  {
     size_t live_bytes = 0;
-    for (size_t i = 0; i < _n_workers; ++i)
+    for (uint i = 0; i < _n_workers; ++i)
       live_bytes += _live_bytes[i];
     return live_bytes;
   }
   size_t used_bytes()  {
     size_t used_bytes = 0;
-    for (size_t i = 0; i < _n_workers; ++i)
+    for (uint i = 0; i < _n_workers; ++i)
       used_bytes += _used_bytes[i];
     return used_bytes;
   }
@@ -1646,18 +1645,18 @@
     AbstractGangTask("G1 note end"), _g1h(g1h),
     _max_live_bytes(0), _freed_bytes(0), _cleanup_list(cleanup_list) { }
 
-  void work(int i) {
+  void work(uint worker_id) {
     double start = os::elapsedTime();
     FreeRegionList local_cleanup_list("Local Cleanup List");
     OldRegionSet old_proxy_set("Local Cleanup Old Proxy Set");
     HumongousRegionSet humongous_proxy_set("Local Cleanup Humongous Proxy Set");
     HRRSCleanupTask hrrs_cleanup_task;
-    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, i, &local_cleanup_list,
+    G1NoteEndOfConcMarkClosure g1_note_end(_g1h, worker_id, &local_cleanup_list,
                                            &old_proxy_set,
                                            &humongous_proxy_set,
                                            &hrrs_cleanup_task);
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1h->heap_region_par_iterate_chunked(&g1_note_end, i,
+      _g1h->heap_region_par_iterate_chunked(&g1_note_end, worker_id,
                                             _g1h->workers()->active_workers(),
                                             HeapRegion::NoteEndClaimValue);
     } else {
@@ -1701,8 +1700,8 @@
     double end = os::elapsedTime();
     if (G1PrintParCleanupStats) {
       gclog_or_tty->print("     Worker thread %d [%8.3f..%8.3f = %8.3f ms] "
-                          "claimed %d regions (tot = %8.3f ms, max = %8.3f ms).\n",
-                          i, start, end, (end-start)*1000.0,
+                          "claimed %u regions (tot = %8.3f ms, max = %8.3f ms).\n",
+                          worker_id, start, end, (end-start)*1000.0,
                           g1_note_end.regions_claimed(),
                           g1_note_end.claimed_region_time_sec()*1000.0,
                           g1_note_end.max_region_time_sec()*1000.0);
@@ -1724,9 +1723,9 @@
     _region_bm(region_bm), _card_bm(card_bm)
   {}
 
-  void work(int i) {
+  void work(uint worker_id) {
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      _g1rs->scrub_par(_region_bm, _card_bm, i,
+      _g1rs->scrub_par(_region_bm, _card_bm, worker_id,
                        HeapRegion::ScrubRemSetClaimValue);
     } else {
       _g1rs->scrub(_region_bm, _card_bm);
@@ -1766,7 +1765,7 @@
 
   HeapRegionRemSet::reset_for_cleanup_tasks();
 
-  size_t n_workers;
+  uint n_workers;
 
   // Do counting once more with the world stopped for good measure.
   G1ParFinalCountTask g1_par_count_task(g1h, nextMarkBitMap(),
@@ -1778,7 +1777,7 @@
 
     g1h->set_par_threads();
     n_workers = g1h->n_par_threads();
-    assert(g1h->n_par_threads() == (int) n_workers,
+    assert(g1h->n_par_threads() == n_workers,
            "Should not have been reset");
     g1h->workers()->run_task(&g1_par_count_task);
     // Done with the parallel phase so reset to 0.
@@ -2169,13 +2168,13 @@
     AbstractGangTask("Process reference objects in parallel"),
     _proc_task(proc_task), _g1h(g1h), _cm(cm) { }
 
-  virtual void work(int i) {
-    CMTask* marking_task = _cm->task(i);
+  virtual void work(uint worker_id) {
+    CMTask* marking_task = _cm->task(worker_id);
     G1CMIsAliveClosure g1_is_alive(_g1h);
     G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
     G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
 
-    _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
+    _proc_task.work(worker_id, g1_is_alive, g1_par_keep_alive, g1_par_drain);
   }
 };
 
@@ -2201,8 +2200,8 @@
     AbstractGangTask("Enqueue reference objects in parallel"),
     _enq_task(enq_task) { }
 
-  virtual void work(int i) {
-    _enq_task.work(i);
+  virtual void work(uint worker_id) {
+    _enq_task.work(worker_id);
   }
 };
 
@@ -2249,8 +2248,8 @@
 
     // We use the work gang from the G1CollectedHeap and we utilize all
     // the worker threads.
-    int active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1;
-    active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
+    uint active_workers = g1h->workers() ? g1h->workers()->active_workers() : 1U;
+    active_workers = MAX2(MIN2(active_workers, _max_task_num), 1U);
 
     G1CMRefProcTaskExecutor par_task_executor(g1h, this,
                                               g1h->workers(), active_workers);
@@ -2314,11 +2313,11 @@
   ConcurrentMark *_cm;
 
 public:
-  void work(int worker_i) {
+  void work(uint worker_id) {
     // Since all available tasks are actually started, we should
     // only proceed if we're supposed to be actived.
-    if ((size_t)worker_i < _cm->active_tasks()) {
-      CMTask* task = _cm->task(worker_i);
+    if (worker_id < _cm->active_tasks()) {
+      CMTask* task = _cm->task(worker_id);
       task->record_start_time();
       do {
         task->do_marking_step(1000000000.0 /* something very large */,
@@ -2347,10 +2346,10 @@
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     G1CollectedHeap::StrongRootsScope srs(g1h);
     // this is remark, so we'll use up all active threads
-    int active_workers = g1h->workers()->active_workers();
+    uint active_workers = g1h->workers()->active_workers();
     if (active_workers == 0) {
       assert(active_workers > 0, "Should have been set earlier");
-      active_workers = ParallelGCThreads;
+      active_workers = (uint) ParallelGCThreads;
       g1h->workers()->set_active_workers(active_workers);
     }
     set_phase(active_workers, false /* concurrent */);
@@ -2366,7 +2365,7 @@
   } else {
     G1CollectedHeap::StrongRootsScope srs(g1h);
     // this is remark, so we'll use up all available threads
-    int active_workers = 1;
+    uint active_workers = 1;
     set_phase(active_workers, false /* concurrent */);
 
     CMRemarkTask remarkTask(this, active_workers);
@@ -2921,7 +2920,7 @@
   int              _ms_size;
   int              _ms_ind;
   int              _array_increment;
-  int              _worker_i;
+  uint             _worker_id;
 
   bool push(oop obj, int arr_ind = 0) {
     if (_ms_ind == _ms_size) {
@@ -2971,7 +2970,7 @@
   }
 
 public:
-  CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, int worker_i) :
+  CSetMarkOopClosure(ConcurrentMark* cm, int ms_size, uint worker_id) :
     _g1h(G1CollectedHeap::heap()),
     _cm(cm),
     _bm(cm->nextMarkBitMap()),
@@ -2979,7 +2978,7 @@
     _ms(NEW_C_HEAP_ARRAY(oop, ms_size)),
     _array_ind_stack(NEW_C_HEAP_ARRAY(jint, ms_size)),
     _array_increment(MAX2(ms_size/8, 16)),
-    _worker_i(worker_i) { }
+    _worker_id(worker_id) { }
 
   ~CSetMarkOopClosure() {
     FREE_C_HEAP_ARRAY(oop, _ms);
@@ -3024,14 +3023,14 @@
   CMBitMap*          _bitMap;
   ConcurrentMark*    _cm;
   CSetMarkOopClosure _oop_cl;
-  int                _worker_i;
+  uint               _worker_id;
 
 public:
-  CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_i) :
+  CSetMarkBitMapClosure(ConcurrentMark* cm, int ms_size, int worker_id) :
     _g1h(G1CollectedHeap::heap()),
     _bitMap(cm->nextMarkBitMap()),
-    _oop_cl(cm, ms_size, worker_i),
-    _worker_i(worker_i) { }
+    _oop_cl(cm, ms_size, worker_id),
+    _worker_id(worker_id) { }
 
   bool do_bit(size_t offset) {
     // convert offset into a HeapWord*
@@ -3056,17 +3055,17 @@
 class CompleteMarkingInCSetHRClosure: public HeapRegionClosure {
   CMBitMap*             _bm;
   CSetMarkBitMapClosure _bit_cl;
-  int                   _worker_i;
+  uint                  _worker_id;
 
   enum SomePrivateConstants {
     MSSize = 1000
   };
 
 public:
-  CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_i) :
+  CompleteMarkingInCSetHRClosure(ConcurrentMark* cm, int worker_id) :
     _bm(cm->nextMarkBitMap()),
-    _bit_cl(cm, MSSize, worker_i),
-    _worker_i(worker_i) { }
+    _bit_cl(cm, MSSize, worker_id),
+    _worker_id(worker_id) { }
 
   bool doHeapRegion(HeapRegion* hr) {
     if (hr->claimHeapRegion(HeapRegion::CompleteMarkCSetClaimValue)) {
@@ -3109,9 +3108,9 @@
     AbstractGangTask("Complete Mark in CSet"),
     _g1h(g1h), _cm(cm) { }
 
-  void work(int worker_i) {
-    CompleteMarkingInCSetHRClosure cmplt(_cm, worker_i);
-    HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_i);
+  void work(uint worker_id) {
+    CompleteMarkingInCSetHRClosure cmplt(_cm, worker_id);
+    HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
     _g1h->collection_set_iterate_from(hr, &cmplt);
   }
 };
@@ -3307,13 +3306,13 @@
 // the CMS bit map. Called at the first checkpoint.
 
 // We take a break if someone is trying to stop the world.
-bool ConcurrentMark::do_yield_check(int worker_i) {
+bool ConcurrentMark::do_yield_check(uint worker_id) {
   if (should_yield()) {
-    if (worker_i == 0) {
+    if (worker_id == 0) {
       _g1h->g1_policy()->record_concurrent_pause();
     }
     cmThread()->yield();
-    if (worker_i == 0) {
+    if (worker_id == 0) {
       _g1h->g1_policy()->record_concurrent_pause_end();
     }
     return true;
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -374,9 +374,9 @@
 protected:
   ConcurrentMarkThread* _cmThread;   // the thread doing the work
   G1CollectedHeap*      _g1h;        // the heap.
-  size_t                _parallel_marking_threads; // the number of marking
+  uint                  _parallel_marking_threads; // the number of marking
                                                    // threads we're use
-  size_t                _max_parallel_marking_threads; // max number of marking
+  uint                  _max_parallel_marking_threads; // max number of marking
                                                    // threads we'll ever use
   double                _sleep_factor; // how much we have to sleep, with
                                        // respect to the work we just did, to
@@ -412,8 +412,8 @@
                                     // last claimed region
 
   // marking tasks
-  size_t                  _max_task_num; // maximum task number
-  size_t                  _active_tasks; // task num currently active
+  uint                    _max_task_num; // maximum task number
+  uint                    _active_tasks; // task num currently active
   CMTask**                _tasks;        // task queue array (max_task_num len)
   CMTaskQueueSet*         _task_queues;  // task queue set
   ParallelTaskTerminator  _terminator;   // for termination
@@ -492,7 +492,7 @@
 
   // It should be called to indicate which phase we're in (concurrent
   // mark or remark) and how many threads are currently active.
-  void set_phase(size_t active_tasks, bool concurrent);
+  void set_phase(uint active_tasks, bool concurrent);
   // We do this after we're done with marking so that the marking data
   // structures are initialised to a sensible and predictable state.
   void set_non_marking_state();
@@ -505,8 +505,8 @@
   }
 
   // accessor methods
-  size_t parallel_marking_threads() { return _parallel_marking_threads; }
-  size_t max_parallel_marking_threads() { return _max_parallel_marking_threads;}
+  uint parallel_marking_threads() { return _parallel_marking_threads; }
+  uint max_parallel_marking_threads() { return _max_parallel_marking_threads;}
   double sleep_factor()             { return _sleep_factor; }
   double marking_task_overhead()    { return _marking_task_overhead;}
   double cleanup_sleep_factor()     { return _cleanup_sleep_factor; }
@@ -514,7 +514,7 @@
 
   HeapWord*               finger()        { return _finger;   }
   bool                    concurrent()    { return _concurrent; }
-  size_t                  active_tasks()  { return _active_tasks; }
+  uint                    active_tasks()  { return _active_tasks; }
   ParallelTaskTerminator* terminator()    { return &_terminator; }
 
   // It claims the next available region to be scanned by a marking
@@ -715,10 +715,10 @@
   // Returns the number of GC threads to be used in a concurrent
   // phase based on the number of GC threads being used in a STW
   // phase.
-  size_t scale_parallel_threads(size_t n_par_threads);
+  uint scale_parallel_threads(uint n_par_threads);
 
   // Calculates the number of GC threads to be used in a concurrent phase.
-  size_t calc_parallel_marking_threads();
+  uint calc_parallel_marking_threads();
 
   // The following three are interaction between CM and
   // G1CollectedHeap
@@ -873,7 +873,7 @@
     return _prevMarkBitMap->isMarked(addr);
   }
 
-  inline bool do_yield_check(int worker_i = 0);
+  inline bool do_yield_check(uint worker_i = 0);
   inline bool should_yield();
 
   // Called to abort the marking cycle after a Full GC takes palce.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -1165,9 +1165,9 @@
       _g1(g1)
   { }
 
-  void work(int i) {
-    RebuildRSOutOfRegionClosure rebuild_rs(_g1, i);
-    _g1->heap_region_par_iterate_chunked(&rebuild_rs, i,
+  void work(uint worker_id) {
+    RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id);
+    _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id,
                                           _g1->workers()->active_workers(),
                                          HeapRegion::RebuildRSClaimValue);
   }
@@ -1374,7 +1374,7 @@
 
     // Rebuild remembered sets of all regions.
     if (G1CollectedHeap::use_parallel_gc_threads()) {
-      int n_workers =
+      uint n_workers =
         AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
                                        workers()->active_workers(),
                                        Threads::number_of_non_daemon_threads());
@@ -2519,11 +2519,11 @@
 
 void
 G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
-                                                 int worker,
-                                                 int no_of_par_workers,
+                                                 uint worker,
+                                                 uint no_of_par_workers,
                                                  jint claim_value) {
   const size_t regions = n_regions();
-  const size_t max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+  const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
                              no_of_par_workers :
                              1);
   assert(UseDynamicNumberOfGCThreads ||
@@ -2739,7 +2739,7 @@
   result = g1_policy()->collection_set();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     size_t cs_size = g1_policy()->cset_region_length();
-    int active_workers = workers()->active_workers();
+    uint active_workers = workers()->active_workers();
     assert(UseDynamicNumberOfGCThreads ||
              active_workers == workers()->total_workers(),
              "Unless dynamic should use total workers");
@@ -3075,10 +3075,10 @@
     return _failures;
   }
 
-  void work(int worker_i) {
+  void work(uint worker_id) {
     HandleMark hm;
     VerifyRegionClosure blk(_allow_dirty, true, _vo);
-    _g1h->heap_region_par_iterate_chunked(&blk, worker_i,
+    _g1h->heap_region_par_iterate_chunked(&blk, worker_id,
                                           _g1h->workers()->active_workers(),
                                           HeapRegion::ParVerifyClaimValue);
     if (blk.failures()) {
@@ -4725,7 +4725,7 @@
   G1CollectedHeap*       _g1h;
   RefToScanQueueSet      *_queues;
   ParallelTaskTerminator _terminator;
-  int _n_workers;
+  uint _n_workers;
 
   Mutex _stats_lock;
   Mutex* stats_lock() { return &_stats_lock; }
@@ -4765,18 +4765,18 @@
     _n_workers = active_workers;
   }
 
-  void work(int i) {
-    if (i >= _n_workers) return;  // no work needed this round
+  void work(uint worker_id) {
+    if (worker_id >= _n_workers) return;  // no work needed this round
 
     double start_time_ms = os::elapsedTime() * 1000.0;
-    _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
+    _g1h->g1_policy()->record_gc_worker_start_time(worker_id, start_time_ms);
 
     ResourceMark rm;
     HandleMark   hm;
 
     ReferenceProcessor*             rp = _g1h->ref_processor_stw();
 
-    G1ParScanThreadState            pss(_g1h, i);
+    G1ParScanThreadState            pss(_g1h, worker_id);
     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, rp);
     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, rp);
@@ -4808,7 +4808,7 @@
                                   scan_root_cl,
                                   &push_heap_rs_cl,
                                   scan_perm_cl,
-                                  i);
+                                  worker_id);
     pss.end_strong_roots();
 
     {
@@ -4817,8 +4817,8 @@
       evac.do_void();
       double elapsed_ms = (os::elapsedTime()-start)*1000.0;
       double term_ms = pss.term_time()*1000.0;
-      _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
-      _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
+      _g1h->g1_policy()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
+      _g1h->g1_policy()->record_termination(worker_id, term_ms, pss.term_attempts());
     }
     _g1h->g1_policy()->record_thread_age_table(pss.age_table());
     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
@@ -4828,12 +4828,12 @@
 
     if (ParallelGCVerbose) {
       MutexLocker x(stats_lock());
-      pss.print_termination_stats(i);
+      pss.print_termination_stats(worker_id);
     }
 
     assert(pss.refs()->is_empty(), "should be empty");
     double end_time_ms = os::elapsedTime() * 1000.0;
-    _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
+    _g1h->g1_policy()->record_gc_worker_end_time(worker_id, end_time_ms);
   }
 };
 
@@ -5091,14 +5091,14 @@
     _terminator(terminator)
   {}
 
-  virtual void work(int i) {
+  virtual void work(uint worker_id) {
     // The reference processing task executed by a single worker.
     ResourceMark rm;
     HandleMark   hm;
 
     G1STWIsAliveClosure is_alive(_g1h);
 
-    G1ParScanThreadState pss(_g1h, i);
+    G1ParScanThreadState pss(_g1h, worker_id);
 
     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
@@ -5130,7 +5130,7 @@
     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
 
     // Call the reference processing task's work routine.
-    _proc_task.work(i, is_alive, keep_alive, drain_queue);
+    _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
 
     // Note we cannot assert that the refs array is empty here as not all
     // of the processing tasks (specifically phase2 - pp2_work) execute
@@ -5165,8 +5165,8 @@
     _enq_task(enq_task)
   { }
 
-  virtual void work(int i) {
-    _enq_task.work(i);
+  virtual void work(uint worker_id) {
+    _enq_task.work(worker_id);
   }
 };
 
@@ -5195,7 +5195,7 @@
   G1CollectedHeap* _g1h;
   RefToScanQueueSet      *_queues;
   ParallelTaskTerminator _terminator;
-  int _n_workers;
+  uint _n_workers;
 
 public:
   G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
@@ -5206,11 +5206,11 @@
     _n_workers(workers)
   { }
 
-  void work(int i) {
+  void work(uint worker_id) {
     ResourceMark rm;
     HandleMark   hm;
 
-    G1ParScanThreadState            pss(_g1h, i);
+    G1ParScanThreadState            pss(_g1h, worker_id);
     G1ParScanHeapEvacClosure        scan_evac_cl(_g1h, &pss, NULL);
     G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
     G1ParScanPartialArrayClosure    partial_scan_cl(_g1h, &pss, NULL);
@@ -5246,17 +5246,17 @@
 
     ReferenceProcessor* rp = _g1h->ref_processor_cm();
 
-    int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
-    int stride = MIN2(MAX2(_n_workers, 1), limit);
+    uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
+    uint stride = MIN2(MAX2(_n_workers, 1U), limit);
 
     // limit is set using max_num_q() - which was set using ParallelGCThreads.
     // So this must be true - but assert just in case someone decides to
     // change the worker ids.
-    assert(0 <= i && i < limit, "sanity");
+    assert(0 <= worker_id && worker_id < limit, "sanity");
     assert(!rp->discovery_is_atomic(), "check this code");
 
     // Select discovered lists [i, i+stride, i+2*stride,...,limit)
-    for (int idx = i; idx < limit; idx += stride) {
+    for (uint idx = worker_id; idx < limit; idx += stride) {
       DiscoveredList& ref_list = rp->discovered_refs()[idx];
 
       DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
@@ -5310,7 +5310,7 @@
   // referents points to another object which is also referenced by an
   // object discovered by the STW ref processor.
 
-  int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
+  uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
                         workers()->active_workers() : 1);
 
   assert(!G1CollectedHeap::use_parallel_gc_threads() ||
@@ -5416,7 +5416,7 @@
   } else {
     // Parallel reference enqueuing
 
-    int active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
+    uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
     assert(active_workers == workers()->active_workers(),
            "Need to reset active_workers");
     assert(rp->num_q() == active_workers, "sanity");
@@ -5445,7 +5445,7 @@
   concurrent_g1_refine()->set_use_cache(false);
   concurrent_g1_refine()->clear_hot_cache_claimed_index();
 
-  int n_workers;
+  uint n_workers;
   if (G1CollectedHeap::use_parallel_gc_threads()) {
     n_workers =
       AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
@@ -5658,7 +5658,7 @@
     AbstractGangTask("G1 Par Cleanup CT Task"),
     _ct_bs(ct_bs), _g1h(g1h) { }
 
-  void work(int i) {
+  void work(uint worker_id) {
     HeapRegion* r;
     while (r = _g1h->pop_dirty_cards_region()) {
       clear_cards(r);
@@ -6141,7 +6141,7 @@
   // Don't change the number of workers.  Use the value previously set
   // in the workgroup.
   assert(G1CollectedHeap::use_parallel_gc_threads(), "shouldn't be here otherwise");
-  int n_workers = workers()->active_workers();
+  uint n_workers = workers()->active_workers();
   assert(UseDynamicNumberOfGCThreads ||
            n_workers == workers()->total_workers(),
       "Otherwise should be using the total number of workers");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -995,7 +995,7 @@
   // Initialize weak reference processing.
   virtual void ref_processing_init();
 
-  void set_par_threads(int t) {
+  void set_par_threads(uint t) {
     SharedHeap::set_par_threads(t);
     // Done in SharedHeap but oddly there are
     // two _process_strong_tasks's in a G1CollectedHeap
@@ -1298,8 +1298,8 @@
   // chunk.)  For now requires that "doHeapRegion" always returns "false",
   // i.e., that a closure never attempt to abort a traversal.
   void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
-                                       int worker,
-                                       int no_of_par_workers,
+                                       uint worker,
+                                       uint no_of_par_workers,
                                        jint claim_value);
 
   // It resets all the region claim values to the default.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -2332,17 +2332,19 @@
     _g1(G1CollectedHeap::heap())
   {}
 
-  void work(int i) {
-    ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted, _chunk_size, i);
+  void work(uint worker_id) {
+    ParKnownGarbageHRClosure parKnownGarbageCl(_hrSorted,
+                                               _chunk_size,
+                                               worker_id);
     // Back to zero for the claim value.
-    _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, i,
+    _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
                                          _g1->workers()->active_workers(),
                                          HeapRegion::InitialClaimValue);
     jint regions_added = parKnownGarbageCl.marked_regions_added();
     _hrSorted->incNumMarkedHeapRegions(regions_added);
     if (G1PrintParCleanupStats) {
       gclog_or_tty->print_cr("     Thread %d called %d times, added %d regions to list.",
-                 i, parKnownGarbageCl.invokes(), regions_added);
+                 worker_id, parKnownGarbageCl.invokes(), regions_added);
     }
   }
 };
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -558,11 +558,11 @@
 }
 
 void G1RemSet::scrub_par(BitMap* region_bm, BitMap* card_bm,
-                                int worker_num, int claim_val) {
+                                uint worker_num, int claim_val) {
   ScrubRSClosure scrub_cl(region_bm, card_bm);
   _g1->heap_region_par_iterate_chunked(&scrub_cl,
                                        worker_num,
-                                       (int) n_workers(),
+                                       n_workers(),
                                        claim_val);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -40,7 +40,7 @@
 protected:
   G1CollectedHeap* _g1;
   unsigned _conc_refine_cards;
-  size_t n_workers();
+  uint n_workers();
 
 protected:
   enum SomePrivateConstants {
@@ -122,7 +122,7 @@
   // parallel thread id of the current thread, and "claim_val" is the
   // value that should be used to claim heap regions.
   void scrub_par(BitMap* region_bm, BitMap* card_bm,
-                 int worker_num, int claim_val);
+                 uint worker_num, int claim_val);
 
   // Refine the card corresponding to "card_ptr".  If "sts" is non-NULL,
   // join and leave around parts that must be atomic wrt GC.  (NULL means
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -29,7 +29,7 @@
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "oops/oop.inline.hpp"
 
-inline size_t G1RemSet::n_workers() {
+inline uint G1RemSet::n_workers() {
   if (_g1->workers() != NULL) {
     return _g1->workers()->total_workers();
   } else {
--- a/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -56,14 +56,14 @@
                           lowest_non_clean_base_chunk_index,
                           lowest_non_clean_chunk_size);
 
-  int n_strides = n_threads * ParGCStridesPerThread;
+  uint n_strides = n_threads * ParGCStridesPerThread;
   SequentialSubTasksDone* pst = sp->par_seq_tasks();
   // Sets the condition for completion of the subtask (how many threads
   // need to finish in order to be done).
   pst->set_n_threads(n_threads);
   pst->set_n_tasks(n_strides);
 
-  int stride = 0;
+  uint stride = 0;
   while (!pst->is_task_claimed(/* reference */ stride)) {
     process_stride(sp, mr, stride, n_strides, cl, ct,
                    lowest_non_clean,
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -590,7 +590,7 @@
 // called after a task is started.  So "i" is based on
 // first-come-first-served.
 
-void ParNewGenTask::work(int i) {
+void ParNewGenTask::work(uint worker_id) {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   // Since this is being done in a separate thread, need new resource
   // and handle marks.
@@ -601,8 +601,8 @@
 
   Generation* old_gen = gch->next_gen(_gen);
 
-  ParScanThreadState& par_scan_state = _state_set->thread_state(i);
-  assert(_state_set->is_valid(i), "Should not have been called");
+  ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
+  assert(_state_set->is_valid(worker_id), "Should not have been called");
 
   par_scan_state.set_young_old_boundary(_young_old_boundary);
 
@@ -755,7 +755,7 @@
                          ParScanThreadStateSet& state_set);
 
 private:
-  virtual void work(int i);
+  virtual void work(uint worker_id);
   virtual void set_for_termination(int active_workers) {
     _state_set.terminator()->reset_for_reuse(active_workers);
   }
@@ -781,13 +781,13 @@
 {
 }
 
-void ParNewRefProcTaskProxy::work(int i)
+void ParNewRefProcTaskProxy::work(uint worker_id)
 {
   ResourceMark rm;
   HandleMark hm;
-  ParScanThreadState& par_scan_state = _state_set.thread_state(i);
+  ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
   par_scan_state.set_young_old_boundary(_young_old_boundary);
-  _task.work(i, par_scan_state.is_alive_closure(),
+  _task.work(worker_id, par_scan_state.is_alive_closure(),
              par_scan_state.keep_alive_closure(),
              par_scan_state.evacuate_followers_closure());
 }
@@ -802,9 +802,9 @@
       _task(task)
   { }
 
-  virtual void work(int i)
+  virtual void work(uint worker_id)
   {
-    _task.work(i);
+    _task.work(worker_id);
   }
 };
 
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -239,7 +239,7 @@
 
   HeapWord* young_old_boundary() { return _young_old_boundary; }
 
-  void work(int i);
+  void work(uint worker_id);
 
   // Reset the terminator in ParScanThreadStateSet for
   // "active_workers" threads.
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -69,7 +69,7 @@
   MemRegion _reserved;
   BarrierSet* _barrier_set;
   bool _is_gc_active;
-  int _n_par_threads;
+  uint _n_par_threads;
 
   unsigned int _total_collections;          // ... started
   unsigned int _total_full_collections;     // ... started
@@ -309,10 +309,10 @@
   GCCause::Cause gc_cause() { return _gc_cause; }
 
   // Number of threads currently working on GC tasks.
-  int n_par_threads() { return _n_par_threads; }
+  uint n_par_threads() { return _n_par_threads; }
 
   // May be overridden to set additional parallelism.
-  virtual void set_par_threads(int t) { _n_par_threads = t; };
+  virtual void set_par_threads(uint t) { _n_par_threads = t; };
 
   // Preload classes into the shared portion of the heap, and then dump
   // that data to a file so that it can be loaded directly by another
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -703,7 +703,7 @@
   return collector_policy()->satisfy_failed_allocation(size, is_tlab);
 }
 
-void GenCollectedHeap::set_par_threads(int t) {
+void GenCollectedHeap::set_par_threads(uint t) {
   SharedHeap::set_par_threads(t);
   _gen_process_strong_tasks->set_n_threads(t);
 }
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -419,8 +419,7 @@
   // asserted to be this type.
   static GenCollectedHeap* heap();
 
-  void set_par_threads(int t);
-
+  void set_par_threads(uint t);
 
   // Invoke the "do_oop" method of one of the closures "not_older_gens"
   // or "older_gens" on root locations for the generation at
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -88,9 +88,9 @@
 
 ReferenceProcessor::ReferenceProcessor(MemRegion span,
                                        bool      mt_processing,
-                                       int       mt_processing_degree,
+                                       uint      mt_processing_degree,
                                        bool      mt_discovery,
-                                       int       mt_discovery_degree,
+                                       uint      mt_discovery_degree,
                                        bool      atomic_discovery,
                                        BoolObjectClosure* is_alive_non_header,
                                        bool      discovered_list_needs_barrier)  :
@@ -105,7 +105,7 @@
   _span = span;
   _discovery_is_atomic = atomic_discovery;
   _discovery_is_mt     = mt_discovery;
-  _num_q               = MAX2(1, mt_processing_degree);
+  _num_q               = MAX2(1U, mt_processing_degree);
   _max_num_q           = MAX2(_num_q, mt_discovery_degree);
   _discovered_refs     = NEW_C_HEAP_ARRAY(DiscoveredList,
                                           _max_num_q * number_of_subclasses_of_ref());
@@ -118,7 +118,7 @@
   _discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
 
   // Initialize all entries to NULL
-  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     _discovered_refs[i].set_head(NULL);
     _discovered_refs[i].set_length(0);
   }
@@ -133,7 +133,7 @@
 #ifndef PRODUCT
 void ReferenceProcessor::verify_no_references_recorded() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     guarantee(_discovered_refs[i].is_empty(),
               "Found non-empty discovered list");
   }
@@ -141,7 +141,7 @@
 #endif
 
 void ReferenceProcessor::weak_oops_do(OopClosure* f) {
-  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (UseCompressedOops) {
       f->do_oop((narrowOop*)_discovered_refs[i].adr_head());
     } else {
@@ -437,7 +437,7 @@
     task_executor->execute(tsk);
   } else {
     // Serial code: call the parent class's implementation
-    for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+    for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
       enqueue_discovered_reflist(_discovered_refs[i], pending_list_addr);
       _discovered_refs[i].set_head(NULL);
       _discovered_refs[i].set_length(0);
@@ -696,7 +696,7 @@
 
 void ReferenceProcessor::abandon_partial_discovery() {
   // loop over the lists
-  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
       gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
     }
@@ -787,7 +787,7 @@
     gclog_or_tty->print_cr("\nBalance ref_lists ");
   }
 
-  for (int i = 0; i < _max_num_q; ++i) {
+  for (uint i = 0; i < _max_num_q; ++i) {
     total_refs += ref_lists[i].length();
     if (TraceReferenceGC && PrintGCDetails) {
       gclog_or_tty->print("%d ", ref_lists[i].length());
@@ -797,8 +797,8 @@
     gclog_or_tty->print_cr(" = %d", total_refs);
   }
   size_t avg_refs = total_refs / _num_q + 1;
-  int to_idx = 0;
-  for (int from_idx = 0; from_idx < _max_num_q; from_idx++) {
+  uint to_idx = 0;
+  for (uint from_idx = 0; from_idx < _max_num_q; from_idx++) {
     bool move_all = false;
     if (from_idx >= _num_q) {
       move_all = ref_lists[from_idx].length() > 0;
@@ -857,7 +857,7 @@
   }
 #ifdef ASSERT
   size_t balanced_total_refs = 0;
-  for (int i = 0; i < _max_num_q; ++i) {
+  for (uint i = 0; i < _max_num_q; ++i) {
     balanced_total_refs += ref_lists[i].length();
     if (TraceReferenceGC && PrintGCDetails) {
       gclog_or_tty->print("%d ", ref_lists[i].length());
@@ -903,7 +903,7 @@
   }
   if (PrintReferenceGC && PrintGCDetails) {
     size_t total = 0;
-    for (int i = 0; i < _max_num_q; ++i) {
+    for (uint i = 0; i < _max_num_q; ++i) {
       total += refs_lists[i].length();
     }
     gclog_or_tty->print(", %u refs", total);
@@ -919,7 +919,7 @@
       RefProcPhase1Task phase1(*this, refs_lists, policy, true /*marks_oops_alive*/);
       task_executor->execute(phase1);
     } else {
-      for (int i = 0; i < _max_num_q; i++) {
+      for (uint i = 0; i < _max_num_q; i++) {
         process_phase1(refs_lists[i], policy,
                        is_alive, keep_alive, complete_gc);
       }
@@ -935,7 +935,7 @@
     RefProcPhase2Task phase2(*this, refs_lists, !discovery_is_atomic() /*marks_oops_alive*/);
     task_executor->execute(phase2);
   } else {
-    for (int i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_q; i++) {
       process_phase2(refs_lists[i], is_alive, keep_alive, complete_gc);
     }
   }
@@ -946,7 +946,7 @@
     RefProcPhase3Task phase3(*this, refs_lists, clear_referent, true /*marks_oops_alive*/);
     task_executor->execute(phase3);
   } else {
-    for (int i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_q; i++) {
       process_phase3(refs_lists[i], clear_referent,
                      is_alive, keep_alive, complete_gc);
     }
@@ -955,7 +955,7 @@
 
 void ReferenceProcessor::clean_up_discovered_references() {
   // loop over the lists
-  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
       gclog_or_tty->print_cr(
         "\nScrubbing %s discovered list of Null referents",
@@ -1000,7 +1000,7 @@
 }
 
 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
-  int id = 0;
+  uint id = 0;
   // Determine the queue index to use for this object.
   if (_discovery_is_mt) {
     // During a multi-threaded discovery phase,
@@ -1282,7 +1282,7 @@
   {
     TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
-    for (int i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1295,7 +1295,7 @@
   {
     TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
-    for (int i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1308,7 +1308,7 @@
   {
     TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
-    for (int i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1321,7 +1321,7 @@
   {
     TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
               false, gclog_or_tty);
-    for (int i = 0; i < _max_num_q; i++) {
+    for (uint i = 0; i < _max_num_q; i++) {
       if (yield->should_return()) {
         return;
       }
@@ -1386,7 +1386,7 @@
   )
 }
 
-const char* ReferenceProcessor::list_name(int i) {
+const char* ReferenceProcessor::list_name(uint i) {
    assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
           "Out of bounds index");
 
@@ -1410,7 +1410,7 @@
 #ifndef PRODUCT
 void ReferenceProcessor::clear_discovered_references() {
   guarantee(!_discovering_refs, "Discovering refs?");
-  for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
+  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
     clear_discovered_references(_discovered_refs[i]);
   }
 }
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -231,7 +231,7 @@
   bool        _enqueuing_is_done;       // true if all weak references enqueued
   bool        _processing_is_mt;        // true during phases when
                                         // reference processing is MT.
-  int         _next_id;                 // round-robin mod _num_q counter in
+  uint        _next_id;                 // round-robin mod _num_q counter in
                                         // support of work distribution
 
   // For collectors that do not keep GC liveness information
@@ -252,9 +252,9 @@
   // The discovered ref lists themselves
 
   // The active MT'ness degree of the queues below
-  int             _num_q;
+  uint             _num_q;
   // The maximum MT'ness degree of the queues below
-  int             _max_num_q;
+  uint             _max_num_q;
 
   // Master array of discovered oops
   DiscoveredList* _discovered_refs;
@@ -268,9 +268,9 @@
  public:
   static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
 
-  int num_q()                              { return _num_q; }
-  int max_num_q()                          { return _max_num_q; }
-  void set_active_mt_degree(int v)         { _num_q = v; }
+  uint num_q()                             { return _num_q; }
+  uint max_num_q()                         { return _max_num_q; }
+  void set_active_mt_degree(uint v)        { _num_q = v; }
 
   DiscoveredList* discovered_refs()        { return _discovered_refs; }
 
@@ -368,7 +368,7 @@
 
   // Returns the name of the discovered reference list
   // occupying the i / _num_q slot.
-  const char* list_name(int i);
+  const char* list_name(uint i);
 
   void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
 
@@ -388,8 +388,8 @@
                                    YieldClosure*      yield);
 
   // round-robin mod _num_q (not: _not_ mode _max_num_q)
-  int next_id() {
-    int id = _next_id;
+  uint next_id() {
+    uint id = _next_id;
     if (++_next_id == _num_q) {
       _next_id = 0;
     }
@@ -434,8 +434,8 @@
 
   // Default parameters give you a vanilla reference processor.
   ReferenceProcessor(MemRegion span,
-                     bool mt_processing = false, int mt_processing_degree = 1,
-                     bool mt_discovery  = false, int mt_discovery_degree  = 1,
+                     bool mt_processing = false, uint mt_processing_degree = 1,
+                     bool mt_discovery  = false, uint mt_discovery_degree  = 1,
                      bool atomic_discovery = true,
                      BoolObjectClosure* is_alive_non_header = NULL,
                      bool discovered_list_needs_barrier = false);
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -94,7 +94,7 @@
              && _thread_holds_heap_lock_for_gc);
 }
 
-void SharedHeap::set_par_threads(int t) {
+void SharedHeap::set_par_threads(uint t) {
   assert(t == 0 || !UseSerialGC, "Cannot have parallel threads");
   _n_par_threads = t;
   _process_strong_tasks->set_n_threads(t);
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -287,7 +287,7 @@
 
   // Sets the number of parallel threads that will be doing tasks
   // (such as process strong roots) subsequently.
-  virtual void set_par_threads(int t);
+  virtual void set_par_threads(uint t);
 
   int n_termination();
   void set_n_termination(int t);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -1551,7 +1551,7 @@
   product(uintx, ParGCDesiredObjsFromOverflowList, 20,                      \
           "The desired number of objects to claim from the overflow list")  \
                                                                             \
-  diagnostic(intx, ParGCStridesPerThread, 2,                                \
+  diagnostic(uintx, ParGCStridesPerThread, 2,                               \
           "The number of strides per worker thread that we divide up the "  \
           "card table scanning work into")                                  \
                                                                             \
--- a/hotspot/src/share/vm/utilities/workgroup.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/utilities/workgroup.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -53,14 +53,14 @@
 }
 
 WorkGang::WorkGang(const char* name,
-                   int         workers,
+                   uint        workers,
                    bool        are_GC_task_threads,
                    bool        are_ConcurrentGC_threads) :
   AbstractWorkGang(name, are_GC_task_threads, are_ConcurrentGC_threads) {
   _total_workers = workers;
 }
 
-GangWorker* WorkGang::allocate_worker(int which) {
+GangWorker* WorkGang::allocate_worker(uint which) {
   GangWorker* new_worker = new GangWorker(this, which);
   return new_worker;
 }
@@ -88,7 +88,7 @@
   } else {
     worker_type = os::pgc_thread;
   }
-  for (int worker = 0; worker < total_workers(); worker += 1) {
+  for (uint worker = 0; worker < total_workers(); worker += 1) {
     GangWorker* new_worker = allocate_worker(worker);
     assert(new_worker != NULL, "Failed to allocate GangWorker");
     _gang_workers[worker] = new_worker;
@@ -108,14 +108,14 @@
     tty->print_cr("Destructing work gang %s", name());
   }
   stop();   // stop all the workers
-  for (int worker = 0; worker < total_workers(); worker += 1) {
+  for (uint worker = 0; worker < total_workers(); worker += 1) {
     delete gang_worker(worker);
   }
   delete gang_workers();
   delete monitor();
 }
 
-GangWorker* AbstractWorkGang::gang_worker(int i) const {
+GangWorker* AbstractWorkGang::gang_worker(uint i) const {
   // Array index bounds checking.
   GangWorker* result = NULL;
   assert(gang_workers() != NULL, "No workers for indexing");
@@ -148,7 +148,7 @@
   // Tell the workers to get to work.
   monitor()->notify_all();
   // Wait for them to be finished
-  while (finished_workers() < (int) no_of_parallel_workers) {
+  while (finished_workers() < no_of_parallel_workers) {
     if (TraceWorkGang) {
       tty->print_cr("Waiting in work gang %s: %d/%d finished sequence %d",
                     name(), finished_workers(), no_of_parallel_workers,
@@ -377,12 +377,12 @@
     _n_workers(0), _n_completed(0), _should_reset(false) {
 }
 
-WorkGangBarrierSync::WorkGangBarrierSync(int n_workers, const char* name)
+WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
   : _monitor(Mutex::safepoint, name, true),
     _n_workers(n_workers), _n_completed(0), _should_reset(false) {
 }
 
-void WorkGangBarrierSync::set_n_workers(int n_workers) {
+void WorkGangBarrierSync::set_n_workers(uint n_workers) {
   _n_workers   = n_workers;
   _n_completed = 0;
   _should_reset = false;
@@ -419,9 +419,9 @@
 
 // SubTasksDone functions.
 
-SubTasksDone::SubTasksDone(int n) :
+SubTasksDone::SubTasksDone(uint n) :
   _n_tasks(n), _n_threads(1), _tasks(NULL) {
-  _tasks = NEW_C_HEAP_ARRAY(jint, n);
+  _tasks = NEW_C_HEAP_ARRAY(uint, n);
   guarantee(_tasks != NULL, "alloc failure");
   clear();
 }
@@ -430,14 +430,14 @@
   return _tasks != NULL;
 }
 
-void SubTasksDone::set_n_threads(int t) {
+void SubTasksDone::set_n_threads(uint t) {
   assert(_claimed == 0 || _threads_completed == _n_threads,
          "should not be called while tasks are being processed!");
   _n_threads = (t == 0 ? 1 : t);
 }
 
 void SubTasksDone::clear() {
-  for (int i = 0; i < _n_tasks; i++) {
+  for (uint i = 0; i < _n_tasks; i++) {
     _tasks[i] = 0;
   }
   _threads_completed = 0;
@@ -446,9 +446,9 @@
 #endif
 }
 
-bool SubTasksDone::is_task_claimed(int t) {
+bool SubTasksDone::is_task_claimed(uint t) {
   assert(0 <= t && t < _n_tasks, "bad task id.");
-  jint old = _tasks[t];
+  uint old = _tasks[t];
   if (old == 0) {
     old = Atomic::cmpxchg(1, &_tasks[t], 0);
   }
@@ -457,7 +457,7 @@
 #ifdef ASSERT
   if (!res) {
     assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
-    Atomic::inc(&_claimed);
+    Atomic::inc((volatile jint*) &_claimed);
   }
 #endif
   return res;
@@ -471,7 +471,7 @@
     observed = Atomic::cmpxchg(old+1, &_threads_completed, old);
   } while (observed != old);
   // If this was the last thread checking in, clear the tasks.
-  if (observed+1 == _n_threads) clear();
+  if (observed+1 == (jint)_n_threads) clear();
 }
 
 
@@ -490,12 +490,12 @@
   return _n_threads > 0;
 }
 
-bool SequentialSubTasksDone::is_task_claimed(int& t) {
-  jint* n_claimed_ptr = &_n_claimed;
+bool SequentialSubTasksDone::is_task_claimed(uint& t) {
+  uint* n_claimed_ptr = &_n_claimed;
   t = *n_claimed_ptr;
   while (t < _n_tasks) {
     jint res = Atomic::cmpxchg(t+1, n_claimed_ptr, t);
-    if (res == t) {
+    if (res == (jint)t) {
       return false;
     }
     t = *n_claimed_ptr;
@@ -504,10 +504,10 @@
 }
 
 bool SequentialSubTasksDone::all_tasks_completed() {
-  jint* n_completed_ptr = &_n_completed;
-  jint  complete        = *n_completed_ptr;
+  uint* n_completed_ptr = &_n_completed;
+  uint  complete        = *n_completed_ptr;
   while (true) {
-    jint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete);
+    uint res = Atomic::cmpxchg(complete+1, n_completed_ptr, complete);
     if (res == complete) {
       break;
     }
--- a/hotspot/src/share/vm/utilities/workgroup.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/utilities/workgroup.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -68,7 +68,7 @@
 public:
   // The abstract work method.
   // The argument tells you which member of the gang you are.
-  virtual void work(int i) = 0;
+  virtual void work(uint worker_id) = 0;
 
   // This method configures the task for proper termination.
   // Some tasks do not have any requirements on termination
@@ -149,7 +149,7 @@
   // and notifies of changes in it.
   Monitor*  _monitor;
   // The count of the number of workers in the gang.
-  int _total_workers;
+  uint _total_workers;
   // Whether the workers should terminate.
   bool _terminate;
   // The array of worker threads for this gang.
@@ -160,18 +160,18 @@
   // A sequence number for the current task.
   int _sequence_number;
   // The number of started workers.
-  int _started_workers;
+  uint _started_workers;
   // The number of finished workers.
-  int _finished_workers;
+  uint _finished_workers;
 public:
   // Accessors for fields
   Monitor* monitor() const {
     return _monitor;
   }
-  int total_workers() const {
+  uint total_workers() const {
     return _total_workers;
   }
-  virtual int active_workers() const {
+  virtual uint active_workers() const {
     return _total_workers;
   }
   bool terminate() const {
@@ -186,10 +186,10 @@
   int sequence_number() const {
     return _sequence_number;
   }
-  int started_workers() const {
+  uint started_workers() const {
     return _started_workers;
   }
-  int finished_workers() const {
+  uint finished_workers() const {
     return _finished_workers;
   }
   bool are_GC_task_threads() const {
@@ -203,7 +203,7 @@
     return (task() == NULL);
   }
   // Return the Ith gang worker.
-  GangWorker* gang_worker(int i) const;
+  GangWorker* gang_worker(uint i) const;
 
   void threads_do(ThreadClosure* tc) const;
 
@@ -255,13 +255,13 @@
 class WorkGang: public AbstractWorkGang {
 public:
   // Constructor
-  WorkGang(const char* name, int workers,
+  WorkGang(const char* name, uint workers,
            bool are_GC_task_threads, bool are_ConcurrentGC_threads);
   // Run a task, returns when the task is done (or terminated).
   virtual void run_task(AbstractGangTask* task);
   void run_task(AbstractGangTask* task, uint no_of_parallel_workers);
   // Allocate a worker and return a pointer to it.
-  virtual GangWorker* allocate_worker(int which);
+  virtual GangWorker* allocate_worker(uint which);
   // Initialize workers in the gang.  Return true if initialization
   // succeeded. The type of the worker can be overridden in a derived
   // class with the appropriate implementation of allocate_worker().
@@ -323,25 +323,25 @@
   // determine completion.
 
  protected:
-  int _active_workers;
+  uint _active_workers;
  public:
   // Constructor and destructor.
   // Initialize active_workers to a minimum value.  Setting it to
   // the parameter "workers" will initialize it to a maximum
   // value which is not desirable.
-  FlexibleWorkGang(const char* name, int workers,
+  FlexibleWorkGang(const char* name, uint workers,
                    bool are_GC_task_threads,
                    bool  are_ConcurrentGC_threads) :
     WorkGang(name, workers, are_GC_task_threads, are_ConcurrentGC_threads),
-    _active_workers(UseDynamicNumberOfGCThreads ? 1 : ParallelGCThreads) {};
+    _active_workers(UseDynamicNumberOfGCThreads ? 1U : ParallelGCThreads) {}
   // Accessors for fields
-  virtual int active_workers() const { return _active_workers; }
-  void set_active_workers(int v) {
+  virtual uint active_workers() const { return _active_workers; }
+  void set_active_workers(uint v) {
     assert(v <= _total_workers,
            "Trying to set more workers active than there are");
     _active_workers = MIN2(v, _total_workers);
     assert(v != 0, "Trying to set active workers to 0");
-    _active_workers = MAX2(1, _active_workers);
+    _active_workers = MAX2(1U, _active_workers);
     assert(UseDynamicNumberOfGCThreads || _active_workers == _total_workers,
            "Unless dynamic should use total workers");
   }
@@ -370,13 +370,13 @@
 class WorkGangBarrierSync : public StackObj {
 protected:
   Monitor _monitor;
-  int     _n_workers;
-  int     _n_completed;
+  uint     _n_workers;
+  uint     _n_completed;
   bool    _should_reset;
 
   Monitor* monitor()        { return &_monitor; }
-  int      n_workers()      { return _n_workers; }
-  int      n_completed()    { return _n_completed; }
+  uint     n_workers()      { return _n_workers; }
+  uint     n_completed()    { return _n_completed; }
   bool     should_reset()   { return _should_reset; }
 
   void     zero_completed() { _n_completed = 0; }
@@ -386,11 +386,11 @@
 
 public:
   WorkGangBarrierSync();
-  WorkGangBarrierSync(int n_workers, const char* name);
+  WorkGangBarrierSync(uint n_workers, const char* name);
 
   // Set the number of workers that will use the barrier.
   // Must be called before any of the workers start running.
-  void set_n_workers(int n_workers);
+  void set_n_workers(uint n_workers);
 
   // Enter the barrier. A worker that enters the barrier will
   // not be allowed to leave until all other threads have
@@ -402,18 +402,18 @@
 // subtasks will be identified by integer indices, usually elements of an
 // enumeration type.
 
-class SubTasksDone: public CHeapObj {
-  jint* _tasks;
-  int _n_tasks;
+class SubTasksDone : public CHeapObj {
+  uint* _tasks;
+  uint _n_tasks;
   // _n_threads is used to determine when a sub task is done.
   // It does not control how many threads will execute the subtask
   // but must be initialized to the number that do execute the task
   // in order to correctly decide when the subtask is done (all the
   // threads working on the task have finished).
-  int _n_threads;
-  jint _threads_completed;
+  uint _n_threads;
+  uint _threads_completed;
 #ifdef ASSERT
-  volatile jint _claimed;
+  volatile uint _claimed;
 #endif
 
   // Set all tasks to unclaimed.
@@ -423,19 +423,19 @@
   // Initializes "this" to a state in which there are "n" tasks to be
   // processed, none of the which are originally claimed.  The number of
   // threads doing the tasks is initialized 1.
-  SubTasksDone(int n);
+  SubTasksDone(uint n);
 
   // True iff the object is in a valid state.
   bool valid();
 
   // Get/set the number of parallel threads doing the tasks to "t".  Can only
   // be called before tasks start or after they are complete.
-  int n_threads() { return _n_threads; }
-  void set_n_threads(int t);
+  uint n_threads() { return _n_threads; }
+  void set_n_threads(uint t);
 
   // Returns "false" if the task "t" is unclaimed, and ensures that task is
   // claimed.  The task "t" is required to be within the range of "this".
-  bool is_task_claimed(int t);
+  bool is_task_claimed(uint t);
 
   // The calling thread asserts that it has attempted to claim all the
   // tasks that it will try to claim.  Every thread in the parallel task
@@ -456,12 +456,12 @@
 
 class SequentialSubTasksDone : public StackObj {
 protected:
-  jint _n_tasks;     // Total number of tasks available.
-  jint _n_claimed;   // Number of tasks claimed.
+  uint _n_tasks;     // Total number of tasks available.
+  uint _n_claimed;   // Number of tasks claimed.
   // _n_threads is used to determine when a sub task is done.
   // See comments on SubTasksDone::_n_threads
-  jint _n_threads;   // Total number of parallel threads.
-  jint _n_completed; // Number of completed threads.
+  uint _n_threads;   // Total number of parallel threads.
+  uint _n_completed; // Number of completed threads.
 
   void clear();
 
@@ -475,26 +475,26 @@
   bool valid();
 
   // number of tasks
-  jint n_tasks() const { return _n_tasks; }
+  uint n_tasks() const { return _n_tasks; }
 
   // Get/set the number of parallel threads doing the tasks to t.
   // Should be called before the task starts but it is safe
   // to call this once a task is running provided that all
   // threads agree on the number of threads.
-  int n_threads() { return _n_threads; }
-  void set_n_threads(int t) { _n_threads = t; }
+  uint n_threads() { return _n_threads; }
+  void set_n_threads(uint t) { _n_threads = t; }
 
   // Set the number of tasks to be claimed to t. As above,
   // should be called before the tasks start but it is safe
   // to call this once a task is running provided all threads
   // agree on the number of tasks.
-  void set_n_tasks(int t) { _n_tasks = t; }
+  void set_n_tasks(uint t) { _n_tasks = t; }
 
   // Returns false if the next task in the sequence is unclaimed,
   // and ensures that it is claimed. Will set t to be the index
   // of the claimed task in the sequence. Will return true if
   // the task cannot be claimed and there are none left to claim.
-  bool is_task_claimed(int& t);
+  bool is_task_claimed(uint& t);
 
   // The calling thread asserts that it has attempted to claim
   // all the tasks it possibly can in the sequence. Every thread
--- a/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/utilities/yieldingWorkgroup.cpp	Wed Dec 14 13:34:57 2011 -0800
@@ -33,11 +33,11 @@
 class WorkData;
 
 YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
-  const char* name, int workers, bool are_GC_task_threads) :
+  const char* name, uint workers, bool are_GC_task_threads) :
   FlexibleWorkGang(name, workers, are_GC_task_threads, false),
     _yielded_workers(0) {}
 
-GangWorker* YieldingFlexibleWorkGang::allocate_worker(int which) {
+GangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
   YieldingFlexibleGangWorker* new_member =
       new YieldingFlexibleGangWorker(this, which);
   return (YieldingFlexibleGangWorker*) new_member;
@@ -120,7 +120,7 @@
   new_task->set_gang(this);  // Establish 2-way binding to support yielding
   _sequence_number++;
 
-  int requested_size = new_task->requested_size();
+  uint requested_size = new_task->requested_size();
   assert(requested_size >= 0, "Should be non-negative");
   if (requested_size != 0) {
     _active_workers = MIN2(requested_size, total_workers());
--- a/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp	Wed Dec 21 07:53:53 2011 -0500
+++ b/hotspot/src/share/vm/utilities/yieldingWorkgroup.hpp	Wed Dec 14 13:34:57 2011 -0800
@@ -71,7 +71,7 @@
 
   // The abstract work method.
   // The argument tells you which member of the gang you are.
-  virtual void work(int i) = 0;
+  virtual void work(uint worker_id) = 0;
 
   int requested_size() const { return _requested_size; }
   int actual_size()    const { return _actual_size; }
@@ -128,7 +128,7 @@
 public:
   // The abstract work method.
   // The argument tells you which member of the gang you are.
-  virtual void work(int i) = 0;
+  virtual void work(uint worker_id) = 0;
 
   // Subclasses should call the parent's yield() method
   // after having done any work specific to the subclass.
@@ -159,7 +159,7 @@
   // Here's the public interface to this class.
 public:
   // Constructor and destructor.
-  YieldingFlexibleWorkGang(const char* name, int workers,
+  YieldingFlexibleWorkGang(const char* name, uint workers,
                            bool are_GC_task_threads);
 
   YieldingFlexibleGangTask* yielding_task() const {
@@ -168,7 +168,7 @@
     return (YieldingFlexibleGangTask*)task();
   }
   // Allocate a worker and return a pointer to it.
-  GangWorker* allocate_worker(int which);
+  GangWorker* allocate_worker(uint which);
 
   // Run a task; returns when the task is done, or the workers yield,
   // or the task is aborted, or the work gang is terminated via stop().
@@ -199,12 +199,12 @@
   void abort();
 
 private:
-  int _yielded_workers;
+  uint _yielded_workers;
   void wait_for_gang();
 
 public:
   // Accessors for fields
-  int yielded_workers() const {
+  uint yielded_workers() const {
     return _yielded_workers;
   }