7157073: G1: type change size_t -> uint for region counts / indexes
authortonyp
Wed, 18 Apr 2012 07:21:15 -0400
changeset 12381 1438e0fbfa27
parent 12380 48f69987dbca
child 12382 6aaecb1cbfe1
7157073: G1: type change size_t -> uint for region counts / indexes Summary: Change the type of fields / variables / etc. that represent region counts and indeces from size_t to uint. Reviewed-by: iveresov, brutisso, jmasa, jwilhelm
hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java
hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java
hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp
hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp
hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp
hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp
hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp
hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp
hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,7 @@
 public class HeapRegionSeq extends VMObject {
     // HeapRegion** _regions;
     static private AddressField regionsField;
-    // size_t _length;
+    // uint _length;
     static private CIntegerField lengthField;
 
     static {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSetBase.java	Wed Apr 18 07:21:15 2012 -0400
@@ -40,9 +40,9 @@
 // Mirror class for HeapRegionSetBase. Represents a group of regions.
 
 public class HeapRegionSetBase extends VMObject {
-    // size_t _length;
+    // uint _length;
     static private CIntegerField lengthField;
-    // size_t _region_num;
+    // uint _region_num;
     static private CIntegerField regionNumField;
     // size_t _total_used_bytes;
     static private CIntegerField totalUsedBytesField;
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -273,7 +273,7 @@
   assert(verify(), "CSet chooser verification");
 }
 
-size_t CollectionSetChooser::calcMinOldCSetLength() {
+uint CollectionSetChooser::calcMinOldCSetLength() {
   // The min old CSet region bound is based on the maximum desired
   // number of mixed GCs after a cycle. I.e., even if some old regions
   // look expensive, we should add them to the CSet anyway to make
@@ -291,10 +291,10 @@
   if (result * gc_num < region_num) {
     result += 1;
   }
-  return result;
+  return (uint) result;
 }
 
-size_t CollectionSetChooser::calcMaxOldCSetLength() {
+uint CollectionSetChooser::calcMaxOldCSetLength() {
   // The max old CSet region bound is based on the threshold expressed
   // as a percentage of the heap size. I.e., it should bound the
   // number of old regions added to the CSet irrespective of how many
@@ -308,7 +308,7 @@
   if (100 * result < region_num * perc) {
     result += 1;
   }
-  return result;
+  return (uint) result;
 }
 
 void CollectionSetChooser::addMarkedHeapRegion(HeapRegion* hr) {
@@ -321,10 +321,10 @@
   hr->calc_gc_efficiency();
 }
 
-void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(size_t n_regions,
-                                                             size_t chunkSize) {
+void CollectionSetChooser::prepareForAddMarkedHeapRegionsPar(uint n_regions,
+                                                             uint chunkSize) {
   _first_par_unreserved_idx = 0;
-  int n_threads = ParallelGCThreads;
+  uint n_threads = (uint) ParallelGCThreads;
   if (UseDynamicNumberOfGCThreads) {
     assert(G1CollectedHeap::heap()->workers()->active_workers() > 0,
       "Should have been set earlier");
@@ -335,12 +335,11 @@
     n_threads = MAX2(G1CollectedHeap::heap()->workers()->active_workers(),
                      1U);
   }
-  size_t max_waste = n_threads * chunkSize;
+  uint max_waste = n_threads * chunkSize;
   // it should be aligned with respect to chunkSize
-  size_t aligned_n_regions =
-                     (n_regions + (chunkSize - 1)) / chunkSize * chunkSize;
-  assert( aligned_n_regions % chunkSize == 0, "should be aligned" );
-  _markedRegions.at_put_grow((int)(aligned_n_regions + max_waste - 1), NULL);
+  uint aligned_n_regions = (n_regions + chunkSize - 1) / chunkSize * chunkSize;
+  assert(aligned_n_regions % chunkSize == 0, "should be aligned");
+  _markedRegions.at_put_grow((int) (aligned_n_regions + max_waste - 1), NULL);
 }
 
 jint CollectionSetChooser::getParMarkedHeapRegionChunk(jint n_regions) {
--- a/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -150,18 +150,18 @@
 
   // Calculate the minimum number of old regions we'll add to the CSet
   // during a mixed GC.
-  size_t calcMinOldCSetLength();
+  uint calcMinOldCSetLength();
 
   // Calculate the maximum number of old regions we'll add to the CSet
   // during a mixed GC.
-  size_t calcMaxOldCSetLength();
+  uint calcMaxOldCSetLength();
 
   // Serial version.
   void addMarkedHeapRegion(HeapRegion *hr);
 
   // Must be called before calls to getParMarkedHeapRegionChunk.
   // "n_regions" is the number of regions, "chunkSize" the chunk size.
-  void prepareForAddMarkedHeapRegionsPar(size_t n_regions, size_t chunkSize);
+  void prepareForAddMarkedHeapRegionsPar(uint n_regions, uint chunkSize);
   // Returns the first index in a contiguous chunk of "n_regions" indexes
   // that the calling thread has reserved.  These must be set by the
   // calling thread using "setMarkedHeapRegion" (to NULL if necessary).
@@ -176,7 +176,7 @@
   void clearMarkedHeapRegions();
 
   // Return the number of candidate regions that remain to be collected.
-  size_t remainingRegions() { return _length - _curr_index; }
+  uint remainingRegions() { return (uint) (_length - _curr_index); }
 
   // Determine whether the CSet chooser has more candidate regions or not.
   bool isEmpty() { return remainingRegions() == 0; }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -403,8 +403,7 @@
   return MAX2((n_par_threads + 2) / 4, 1U);
 }
 
-ConcurrentMark::ConcurrentMark(ReservedSpace rs,
-                               int max_regions) :
+ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
   _markBitMap1(rs, MinObjAlignment - 1),
   _markBitMap2(rs, MinObjAlignment - 1),
 
@@ -415,7 +414,7 @@
   _cleanup_sleep_factor(0.0),
   _cleanup_task_overhead(1.0),
   _cleanup_list("Cleanup List"),
-  _region_bm(max_regions, false /* in_resource_area*/),
+  _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
   _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
            CardTableModRefBS::card_shift,
            false /* in_resource_area*/),
@@ -497,7 +496,7 @@
     _task_queues->register_queue(i, task_queue);
 
     _count_card_bitmaps[i] = BitMap(card_bm_size, false);
-    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions);
+    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions);
 
     _tasks[i] = new CMTask(i, this,
                            _count_marked_bytes[i],
@@ -1228,18 +1227,17 @@
   void set_bit_for_region(HeapRegion* hr) {
     assert(!hr->continuesHumongous(), "should have filtered those out");
 
-    size_t index = hr->hrs_index();
+    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
     if (!hr->startsHumongous()) {
       // Normal (non-humongous) case: just set the bit.
-      _region_bm->par_at_put((BitMap::idx_t) index, true);
+      _region_bm->par_at_put(index, true);
     } else {
       // Starts humongous case: calculate how many regions are part of
       // this humongous region and then set the bit range.
       G1CollectedHeap* g1h = G1CollectedHeap::heap();
       HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
-      size_t end_index = last_hr->hrs_index() + 1;
-      _region_bm->par_at_put_range((BitMap::idx_t) index,
-                                   (BitMap::idx_t) end_index, true);
+      BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
+      _region_bm->par_at_put_range(index, end_index, true);
     }
   }
 
@@ -1418,7 +1416,7 @@
     // Verify that _top_at_conc_count == ntams
     if (hr->top_at_conc_mark_count() != hr->next_top_at_mark_start()) {
       if (_verbose) {
-        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": top at conc count incorrect: "
+        gclog_or_tty->print_cr("Region %u: top at conc count incorrect: "
                                "expected " PTR_FORMAT ", actual: " PTR_FORMAT,
                                hr->hrs_index(), hr->next_top_at_mark_start(),
                                hr->top_at_conc_mark_count());
@@ -1434,7 +1432,7 @@
     // we have missed accounting some objects during the actual marking.
     if (exp_marked_bytes > act_marked_bytes) {
       if (_verbose) {
-        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": marked bytes mismatch: "
+        gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
                                "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
                                hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
       }
@@ -1445,15 +1443,16 @@
     // (which was just calculated) region bit maps.
     // We're not OK if the bit in the calculated expected region
     // bitmap is set and the bit in the actual region bitmap is not.
-    BitMap::idx_t index = (BitMap::idx_t)hr->hrs_index();
+    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
 
     bool expected = _exp_region_bm->at(index);
     bool actual = _region_bm->at(index);
     if (expected && !actual) {
       if (_verbose) {
-        gclog_or_tty->print_cr("Region " SIZE_FORMAT ": region bitmap mismatch: "
-                               "expected: %d, actual: %d",
-                               hr->hrs_index(), expected, actual);
+        gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
+                               "expected: %s, actual: %s",
+                               hr->hrs_index(),
+                               BOOL_TO_STR(expected), BOOL_TO_STR(actual));
       }
       failures += 1;
     }
@@ -1471,9 +1470,10 @@
 
       if (expected && !actual) {
         if (_verbose) {
-          gclog_or_tty->print_cr("Region " SIZE_FORMAT ": card bitmap mismatch at " SIZE_FORMAT ": "
-                                 "expected: %d, actual: %d",
-                                 hr->hrs_index(), i, expected, actual);
+          gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
+                                 "expected: %s, actual: %s",
+                                 hr->hrs_index(), i,
+                                 BOOL_TO_STR(expected), BOOL_TO_STR(actual));
         }
         failures += 1;
       }
@@ -1603,18 +1603,17 @@
   void set_bit_for_region(HeapRegion* hr) {
     assert(!hr->continuesHumongous(), "should have filtered those out");
 
-    size_t index = hr->hrs_index();
+    BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
     if (!hr->startsHumongous()) {
       // Normal (non-humongous) case: just set the bit.
-      _region_bm->par_set_bit((BitMap::idx_t) index);
+      _region_bm->par_set_bit(index);
     } else {
       // Starts humongous case: calculate how many regions are part of
       // this humongous region and then set the bit range.
       G1CollectedHeap* g1h = G1CollectedHeap::heap();
       HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
-      size_t end_index = last_hr->hrs_index() + 1;
-      _region_bm->par_at_put_range((BitMap::idx_t) index,
-                                   (BitMap::idx_t) end_index, true);
+      BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
+      _region_bm->par_at_put_range(index, end_index, true);
     }
   }
 
@@ -1718,8 +1717,8 @@
       _n_workers = 1;
     }
 
-    _live_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
-    _used_bytes = NEW_C_HEAP_ARRAY(size_t, _n_workers);
+    _live_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers);
+    _used_bytes = NEW_C_HEAP_ARRAY(size_t, (size_t) _n_workers);
   }
 
   ~G1ParFinalCountTask() {
@@ -1768,7 +1767,7 @@
   G1CollectedHeap* _g1;
   int _worker_num;
   size_t _max_live_bytes;
-  size_t _regions_claimed;
+  uint _regions_claimed;
   size_t _freed_bytes;
   FreeRegionList* _local_cleanup_list;
   OldRegionSet* _old_proxy_set;
@@ -1821,7 +1820,7 @@
   }
 
   size_t max_live_bytes() { return _max_live_bytes; }
-  size_t regions_claimed() { return _regions_claimed; }
+  uint regions_claimed() { return _regions_claimed; }
   double claimed_region_time_sec() { return _claimed_region_time; }
   double max_region_time_sec() { return _max_region_time; }
 };
@@ -2146,7 +2145,7 @@
 
   if (G1ConcRegionFreeingVerbose) {
     gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
-                           "cleanup list has "SIZE_FORMAT" entries",
+                           "cleanup list has %u entries",
                            _cleanup_list.length());
   }
 
@@ -2168,9 +2167,8 @@
         _cleanup_list.is_empty()) {
       if (G1ConcRegionFreeingVerbose) {
         gclog_or_tty->print_cr("G1ConcRegionFreeing [complete cleanup] : "
-                               "appending "SIZE_FORMAT" entries to the "
-                               "secondary_free_list, clean list still has "
-                               SIZE_FORMAT" entries",
+                               "appending %u entries to the secondary_free_list, "
+                               "cleanup list still has %u entries",
                                tmp_free_list.length(),
                                _cleanup_list.length());
       }
@@ -3140,7 +3138,7 @@
     assert(limit_idx <= end_idx, "or else use atomics");
 
     // Aggregate the "stripe" in the count data associated with hr.
-    size_t hrs_index = hr->hrs_index();
+    uint hrs_index = hr->hrs_index();
     size_t marked_bytes = 0;
 
     for (int i = 0; (size_t)i < _max_task_num; i += 1) {
@@ -3248,7 +3246,7 @@
   // of the final counting task.
   _region_bm.clear();
 
-  size_t max_regions = _g1h->max_regions();
+  uint max_regions = _g1h->max_regions();
   assert(_max_task_num != 0, "unitialized");
 
   for (int i = 0; (size_t) i < _max_task_num; i += 1) {
@@ -3258,7 +3256,7 @@
     assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
     assert(marked_bytes_array != NULL, "uninitialized");
 
-    memset(marked_bytes_array, 0, (max_regions * sizeof(size_t)));
+    memset(marked_bytes_array, 0, (size_t) max_regions * sizeof(size_t));
     task_card_bm->clear();
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -636,7 +636,7 @@
     return _task_queues->steal(task_num, hash_seed, obj);
   }
 
-  ConcurrentMark(ReservedSpace rs, int max_regions);
+  ConcurrentMark(ReservedSpace rs, uint max_regions);
   ~ConcurrentMark();
 
   ConcurrentMarkThread* cmThread() { return _cmThread; }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -49,7 +49,7 @@
   HeapWord* start = mr.start();
   HeapWord* last = mr.last();
   size_t region_size_bytes = mr.byte_size();
-  size_t index = hr->hrs_index();
+  uint index = hr->hrs_index();
 
   assert(!hr->continuesHumongous(), "should not be HC region");
   assert(hr == g1h->heap_region_containing(start), "sanity");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -140,7 +140,7 @@
 }
 
 void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
-  msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
+  msg->append("[%s] %s c: %u b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
               _name, message, _count, BOOL_TO_STR(_bot_updates),
               _alloc_region, _used_bytes_before);
 }
@@ -215,7 +215,7 @@
       jio_snprintf(rest_buffer, buffer_length, "");
     }
 
-    tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s",
+    tty->print_cr("[%s] %u %s : %s %s",
                   _name, _count, hr_buffer, str, rest_buffer);
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
   // the region that is re-used using the set() method. This count can
   // be used in any heuristics that might want to bound how many
   // distinct regions this object can used during an active interval.
-  size_t _count;
+  uint _count;
 
   // When we set up a new active region we save its used bytes in this
   // field so that, when we retire it, we can calculate how much space
@@ -136,7 +136,7 @@
     return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
   }
 
-  size_t count() { return _count; }
+  uint count() { return _count; }
 
   // The following two are the building blocks for the allocation method.
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -234,7 +234,7 @@
 bool YoungList::check_list_well_formed() {
   bool ret = true;
 
-  size_t length = 0;
+  uint length = 0;
   HeapRegion* curr = _head;
   HeapRegion* last = NULL;
   while (curr != NULL) {
@@ -253,7 +253,7 @@
 
   if (!ret) {
     gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
-    gclog_or_tty->print_cr("###   list has %d entries, _length is %d",
+    gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
                            length, _length);
   }
 
@@ -264,7 +264,7 @@
   bool ret = true;
 
   if (_length != 0) {
-    gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %d",
+    gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
                   _length);
     ret = false;
   }
@@ -337,8 +337,7 @@
     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
     young_index_in_cset += 1;
   }
-  assert((size_t) young_index_in_cset == _survivor_length,
-         "post-condition");
+  assert((uint) young_index_in_cset == _survivor_length, "post-condition");
   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 
   _head   = _survivor_head;
@@ -533,7 +532,7 @@
     if (!_secondary_free_list.is_empty()) {
       if (G1ConcRegionFreeingVerbose) {
         gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
-                               "secondary_free_list has "SIZE_FORMAT" entries",
+                               "secondary_free_list has %u entries",
                                _secondary_free_list.length());
       }
       // It looks as if there are free regions available on the
@@ -619,12 +618,12 @@
   return res;
 }
 
-size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
-                                                          size_t word_size) {
+uint G1CollectedHeap::humongous_obj_allocate_find_first(uint num_regions,
+                                                        size_t word_size) {
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
-  size_t first = G1_NULL_HRS_INDEX;
+  uint first = G1_NULL_HRS_INDEX;
   if (num_regions == 1) {
     // Only one region to allocate, no need to go through the slower
     // path. The caller will attempt the expasion if this fails, so
@@ -650,7 +649,7 @@
     if (free_regions() >= num_regions) {
       first = _hrs.find_contiguous(num_regions);
       if (first != G1_NULL_HRS_INDEX) {
-        for (size_t i = first; i < first + num_regions; ++i) {
+        for (uint i = first; i < first + num_regions; ++i) {
           HeapRegion* hr = region_at(i);
           assert(hr->is_empty(), "sanity");
           assert(is_on_master_free_list(hr), "sanity");
@@ -664,15 +663,15 @@
 }
 
 HeapWord*
-G1CollectedHeap::humongous_obj_allocate_initialize_regions(size_t first,
-                                                           size_t num_regions,
+G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
+                                                           uint num_regions,
                                                            size_t word_size) {
   assert(first != G1_NULL_HRS_INDEX, "pre-condition");
   assert(isHumongous(word_size), "word_size should be humongous");
   assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
 
   // Index of last region in the series + 1.
-  size_t last = first + num_regions;
+  uint last = first + num_regions;
 
   // We need to initialize the region(s) we just discovered. This is
   // a bit tricky given that it can happen concurrently with
@@ -683,7 +682,7 @@
   // a specific order.
 
   // The word size sum of all the regions we will allocate.
-  size_t word_size_sum = num_regions * HeapRegion::GrainWords;
+  size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
   assert(word_size <= word_size_sum, "sanity");
 
   // This will be the "starts humongous" region.
@@ -722,7 +721,7 @@
   // Then, if there are any, we will set up the "continues
   // humongous" regions.
   HeapRegion* hr = NULL;
-  for (size_t i = first + 1; i < last; ++i) {
+  for (uint i = first + 1; i < last; ++i) {
     hr = region_at(i);
     hr->set_continuesHumongous(first_hr);
   }
@@ -768,7 +767,7 @@
   // last one) is actually used when we will free up the humongous
   // region in free_humongous_region().
   hr = NULL;
-  for (size_t i = first + 1; i < last; ++i) {
+  for (uint i = first + 1; i < last; ++i) {
     hr = region_at(i);
     if ((i + 1) == last) {
       // last continues humongous region
@@ -804,14 +803,14 @@
 
   verify_region_sets_optional();
 
-  size_t num_regions =
-         round_to(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords;
-  size_t x_size = expansion_regions();
-  size_t fs = _hrs.free_suffix();
-  size_t first = humongous_obj_allocate_find_first(num_regions, word_size);
+  size_t word_size_rounded = round_to(word_size, HeapRegion::GrainWords);
+  uint num_regions = (uint) (word_size_rounded / HeapRegion::GrainWords);
+  uint x_num = expansion_regions();
+  uint fs = _hrs.free_suffix();
+  uint first = humongous_obj_allocate_find_first(num_regions, word_size);
   if (first == G1_NULL_HRS_INDEX) {
     // The only thing we can do now is attempt expansion.
-    if (fs + x_size >= num_regions) {
+    if (fs + x_num >= num_regions) {
       // If the number of regions we're trying to allocate for this
       // object is at most the number of regions in the free suffix,
       // then the call to humongous_obj_allocate_find_first() above
@@ -1781,7 +1780,7 @@
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
                                          HeapRegion::GrainBytes);
-  size_t num_regions_deleted = 0;
+  uint num_regions_deleted = 0;
   MemRegion mr = _hrs.shrink_by(aligned_shrink_bytes, &num_regions_deleted);
   HeapWord* old_end = (HeapWord*) _g1_storage.high();
   assert(mr.end() == old_end, "post-condition");
@@ -2004,7 +2003,7 @@
   _reserved.set_start((HeapWord*)heap_rs.base());
   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  _expansion_regions = max_byte_size/HeapRegion::GrainBytes;
+  _expansion_regions = (uint) (max_byte_size / HeapRegion::GrainBytes);
 
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
@@ -2041,7 +2040,7 @@
 
   // 6843694 - ensure that the maximum region index can fit
   // in the remembered set structures.
-  const size_t max_region_idx = ((size_t)1 << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
+  const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
   guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
 
   size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
@@ -2057,13 +2056,14 @@
   _g1h = this;
 
    _in_cset_fast_test_length = max_regions();
-   _in_cset_fast_test_base = NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length);
+   _in_cset_fast_test_base =
+                   NEW_C_HEAP_ARRAY(bool, (size_t) _in_cset_fast_test_length);
 
    // We're biasing _in_cset_fast_test to avoid subtracting the
    // beginning of the heap every time we want to index; basically
    // it's the same with what we do with the card table.
    _in_cset_fast_test = _in_cset_fast_test_base -
-                ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
+               ((uintx) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes);
 
    // Clear the _cset_fast_test bitmap in anticipation of adding
    // regions to the incremental collection set for the first
@@ -2072,7 +2072,7 @@
 
   // Create the ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
-  _cm       = new ConcurrentMark(heap_rs, (int) max_regions());
+  _cm       = new ConcurrentMark(heap_rs, max_regions());
   _cmThread = _cm->cmThread();
 
   // Initialize the from_card cache structure of HeapRegionRemSet.
@@ -2581,7 +2581,7 @@
                                                  uint worker,
                                                  uint no_of_par_workers,
                                                  jint claim_value) {
-  const size_t regions = n_regions();
+  const uint regions = n_regions();
   const uint max_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
                              no_of_par_workers :
                              1);
@@ -2589,11 +2589,11 @@
          no_of_par_workers == workers()->total_workers(),
          "Non dynamic should use fixed number of workers");
   // try to spread out the starting points of the workers
-  const size_t start_index = regions / max_workers * (size_t) worker;
+  const uint start_index = regions / max_workers * worker;
 
   // each worker will actually look at all regions
-  for (size_t count = 0; count < regions; ++count) {
-    const size_t index = (start_index + count) % regions;
+  for (uint count = 0; count < regions; ++count) {
+    const uint index = (start_index + count) % regions;
     assert(0 <= index && index < regions, "sanity");
     HeapRegion* r = region_at(index);
     // we'll ignore "continues humongous" regions (we'll process them
@@ -2615,7 +2615,7 @@
         // result, we might end up processing them twice. So, we'll do
         // them first (notice: most closures will ignore them anyway) and
         // then we'll do the "starts humongous" region.
-        for (size_t ch_index = index + 1; ch_index < regions; ++ch_index) {
+        for (uint ch_index = index + 1; ch_index < regions; ++ch_index) {
           HeapRegion* chr = region_at(ch_index);
 
           // if the region has already been claimed or it's not
@@ -2683,8 +2683,9 @@
 class CheckClaimValuesClosure : public HeapRegionClosure {
 private:
   jint _claim_value;
-  size_t _failures;
+  uint _failures;
   HeapRegion* _sh_region;
+
 public:
   CheckClaimValuesClosure(jint claim_value) :
     _claim_value(claim_value), _failures(0), _sh_region(NULL) { }
@@ -2712,9 +2713,7 @@
     }
     return false;
   }
-  size_t failures() {
-    return _failures;
-  }
+  uint failures() { return _failures; }
 };
 
 bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) {
@@ -2724,17 +2723,15 @@
 }
 
 class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure {
-  jint   _claim_value;
-  size_t _failures;
+private:
+  jint _claim_value;
+  uint _failures;
 
 public:
   CheckClaimValuesInCSetHRClosure(jint claim_value) :
-    _claim_value(claim_value),
-    _failures(0) { }
-
-  size_t failures() {
-    return _failures;
-  }
+    _claim_value(claim_value), _failures(0) { }
+
+  uint failures() { return _failures; }
 
   bool doHeapRegion(HeapRegion* hr) {
     assert(hr->in_collection_set(), "how?");
@@ -2801,14 +2798,14 @@
 
   result = g1_policy()->collection_set();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
-    size_t cs_size = g1_policy()->cset_region_length();
+    uint cs_size = g1_policy()->cset_region_length();
     uint active_workers = workers()->active_workers();
     assert(UseDynamicNumberOfGCThreads ||
              active_workers == workers()->total_workers(),
              "Unless dynamic should use total workers");
 
-    size_t end_ind   = (cs_size * worker_i) / active_workers;
-    size_t start_ind = 0;
+    uint end_ind   = (cs_size * worker_i) / active_workers;
+    uint start_ind = 0;
 
     if (worker_i > 0 &&
         _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
@@ -2818,7 +2815,7 @@
       result = _worker_cset_start_region[worker_i - 1];
     }
 
-    for (size_t i = start_ind; i < end_ind; i++) {
+    for (uint i = start_ind; i < end_ind; i++) {
       result = result->next_in_collection_set();
     }
   }
@@ -3280,12 +3277,12 @@
             _g1_storage.high_boundary());
   st->cr();
   st->print("  region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
-  size_t young_regions = _young_list->length();
-  st->print(SIZE_FORMAT " young (" SIZE_FORMAT "K), ",
-            young_regions, young_regions * HeapRegion::GrainBytes / K);
-  size_t survivor_regions = g1_policy()->recorded_survivor_regions();
-  st->print(SIZE_FORMAT " survivors (" SIZE_FORMAT "K)",
-            survivor_regions, survivor_regions * HeapRegion::GrainBytes / K);
+  uint young_regions = _young_list->length();
+  st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
+            (size_t) young_regions * HeapRegion::GrainBytes / K);
+  uint survivor_regions = g1_policy()->recorded_survivor_regions();
+  st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
+            (size_t) survivor_regions * HeapRegion::GrainBytes / K);
   st->cr();
   perm()->as_gen()->print_on(st);
 }
@@ -3295,7 +3292,11 @@
 
   // Print the per-region information.
   st->cr();
-  st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), HS=humongous(starts), HC=humongous(continues), CS=collection set, F=free, TS=gc time stamp, PTAMS=previous top-at-mark-start, NTAMS=next top-at-mark-start)");
+  st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
+               "HS=humongous(starts), HC=humongous(continues), "
+               "CS=collection set, F=free, TS=gc time stamp, "
+               "PTAMS=previous top-at-mark-start, "
+               "NTAMS=next top-at-mark-start)");
   PrintRegionClosure blk(st);
   heap_region_iterate(&blk);
 }
@@ -3473,16 +3474,16 @@
 
 void
 G1CollectedHeap::setup_surviving_young_words() {
-  guarantee( _surviving_young_words == NULL, "pre-condition" );
-  size_t array_length = g1_policy()->young_cset_region_length();
-  _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
+  assert(_surviving_young_words == NULL, "pre-condition");
+  uint array_length = g1_policy()->young_cset_region_length();
+  _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length);
   if (_surviving_young_words == NULL) {
     vm_exit_out_of_memory(sizeof(size_t) * array_length,
                           "Not enough space for young surv words summary.");
   }
-  memset(_surviving_young_words, 0, array_length * sizeof(size_t));
+  memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
 #ifdef ASSERT
-  for (size_t i = 0;  i < array_length; ++i) {
+  for (uint i = 0;  i < array_length; ++i) {
     assert( _surviving_young_words[i] == 0, "memset above" );
   }
 #endif // !ASSERT
@@ -3491,9 +3492,10 @@
 void
 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-  size_t array_length = g1_policy()->young_cset_region_length();
-  for (size_t i = 0; i < array_length; ++i)
+  uint array_length = g1_policy()->young_cset_region_length();
+  for (uint i = 0; i < array_length; ++i) {
     _surviving_young_words[i] += surv_young_words[i];
+  }
 }
 
 void
@@ -4242,16 +4244,16 @@
   // non-young regions (where the age is -1)
   // We also add a few elements at the beginning and at the end in
   // an attempt to eliminate cache contention
-  size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
-  size_t array_length = PADDING_ELEM_NUM +
-                        real_length +
-                        PADDING_ELEM_NUM;
+  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
+  uint array_length = PADDING_ELEM_NUM +
+                      real_length +
+                      PADDING_ELEM_NUM;
   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length);
   if (_surviving_young_words_base == NULL)
     vm_exit_out_of_memory(array_length * sizeof(size_t),
                           "Not enough space for young surv histo.");
   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
-  memset(_surviving_young_words, 0, real_length * sizeof(size_t));
+  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
 
   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
@@ -4388,7 +4390,7 @@
 template <bool do_gen_barrier, G1Barrier barrier, bool do_mark_object>
 oop G1ParCopyClosure<do_gen_barrier, barrier, do_mark_object>
   ::copy_to_survivor_space(oop old) {
-  size_t    word_sz = old->size();
+  size_t word_sz = old->size();
   HeapRegion* from_region = _g1->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
   int       young_index = from_region->young_index_in_cset()+1;
@@ -5585,8 +5587,8 @@
   hr->set_notHumongous();
   free_region(hr, &hr_pre_used, free_list, par);
 
-  size_t i = hr->hrs_index() + 1;
-  size_t num = 1;
+  uint i = hr->hrs_index() + 1;
+  uint num = 1;
   while (i < n_regions()) {
     HeapRegion* curr_hr = region_at(i);
     if (!curr_hr->continuesHumongous()) {
@@ -5795,7 +5797,7 @@
     if (cur->is_young()) {
       int index = cur->young_index_in_cset();
       assert(index != -1, "invariant");
-      assert((size_t) index < policy->young_cset_region_length(), "invariant");
+      assert((uint) index < policy->young_cset_region_length(), "invariant");
       size_t words_survived = _surviving_young_words[index];
       cur->record_surv_words_in_group(words_survived);
 
@@ -6135,7 +6137,7 @@
 // Methods for the GC alloc regions
 
 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
-                                                 size_t count,
+                                                 uint count,
                                                  GCAllocPurpose ap) {
   assert(FreeList_lock->owned_by_self(), "pre-condition");
 
@@ -6207,7 +6209,7 @@
   FreeRegionList*     _free_list;
   OldRegionSet*       _old_set;
   HumongousRegionSet* _humongous_set;
-  size_t              _region_count;
+  uint                _region_count;
 
 public:
   VerifyRegionListsClosure(OldRegionSet* old_set,
@@ -6216,7 +6218,7 @@
     _old_set(old_set), _humongous_set(humongous_set),
     _free_list(free_list), _region_count(0) { }
 
-  size_t region_count()      { return _region_count;      }
+  uint region_count() { return _region_count; }
 
   bool doHeapRegion(HeapRegion* hr) {
     _region_count += 1;
@@ -6238,7 +6240,7 @@
   }
 };
 
-HeapRegion* G1CollectedHeap::new_heap_region(size_t hrs_index,
+HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
                                              HeapWord* bottom) {
   HeapWord* end = bottom + HeapRegion::GrainWords;
   MemRegion mr(bottom, end);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -85,8 +85,8 @@
 
   HeapRegion* _curr;
 
-  size_t      _length;
-  size_t      _survivor_length;
+  uint        _length;
+  uint        _survivor_length;
 
   size_t      _last_sampled_rs_lengths;
   size_t      _sampled_rs_lengths;
@@ -101,8 +101,8 @@
 
   void         empty_list();
   bool         is_empty() { return _length == 0; }
-  size_t       length() { return _length; }
-  size_t       survivor_length() { return _survivor_length; }
+  uint         length() { return _length; }
+  uint         survivor_length() { return _survivor_length; }
 
   // Currently we do not keep track of the used byte sum for the
   // young list and the survivors and it'd be quite a lot of work to
@@ -111,10 +111,10 @@
   // we'll report the more accurate information then.
   size_t       eden_used_bytes() {
     assert(length() >= survivor_length(), "invariant");
-    return (length() - survivor_length()) * HeapRegion::GrainBytes;
+    return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes;
   }
   size_t       survivor_used_bytes() {
-    return survivor_length() * HeapRegion::GrainBytes;
+    return (size_t) survivor_length() * HeapRegion::GrainBytes;
   }
 
   void rs_length_sampling_init();
@@ -247,7 +247,7 @@
   MasterHumongousRegionSet  _humongous_set;
 
   // The number of regions we could create by expansion.
-  size_t _expansion_regions;
+  uint _expansion_regions;
 
   // The block offset table for the G1 heap.
   G1BlockOffsetSharedArray* _bot_shared;
@@ -339,7 +339,7 @@
   bool* _in_cset_fast_test_base;
 
   // The length of the _in_cset_fast_test_base array.
-  size_t _in_cset_fast_test_length;
+  uint _in_cset_fast_test_length;
 
   volatile unsigned _gc_time_stamp;
 
@@ -458,14 +458,14 @@
   // length and remove them from the master free list. Return the
   // index of the first region or G1_NULL_HRS_INDEX if the search
   // was unsuccessful.
-  size_t humongous_obj_allocate_find_first(size_t num_regions,
-                                           size_t word_size);
+  uint humongous_obj_allocate_find_first(uint num_regions,
+                                         size_t word_size);
 
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
   // humongous region.
-  HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
-                                                      size_t num_regions,
+  HeapWord* humongous_obj_allocate_initialize_regions(uint first,
+                                                      uint num_regions,
                                                       size_t word_size);
 
   // Attempt to allocate a humongous object of the given size. Return
@@ -574,7 +574,7 @@
                                    size_t allocated_bytes);
 
   // For GC alloc regions.
-  HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
+  HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
                                   GCAllocPurpose ap);
   void retire_gc_alloc_region(HeapRegion* alloc_region,
                               size_t allocated_bytes, GCAllocPurpose ap);
@@ -641,7 +641,7 @@
   void register_region_with_in_cset_fast_test(HeapRegion* r) {
     assert(_in_cset_fast_test_base != NULL, "sanity");
     assert(r->in_collection_set(), "invariant");
-    size_t index = r->hrs_index();
+    uint index = r->hrs_index();
     assert(index < _in_cset_fast_test_length, "invariant");
     assert(!_in_cset_fast_test_base[index], "invariant");
     _in_cset_fast_test_base[index] = true;
@@ -655,7 +655,7 @@
     if (_g1_committed.contains((HeapWord*) obj)) {
       // no need to subtract the bottom of the heap from obj,
       // _in_cset_fast_test is biased
-      size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
+      uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
       bool ret = _in_cset_fast_test[index];
       // let's make sure the result is consistent with what the slower
       // test returns
@@ -670,7 +670,7 @@
   void clear_cset_fast_test() {
     assert(_in_cset_fast_test_base != NULL, "sanity");
     memset(_in_cset_fast_test_base, false,
-        _in_cset_fast_test_length * sizeof(bool));
+           (size_t) _in_cset_fast_test_length * sizeof(bool));
   }
 
   // This is called at the end of either a concurrent cycle or a Full
@@ -1101,23 +1101,23 @@
   }
 
   // The total number of regions in the heap.
-  size_t n_regions() { return _hrs.length(); }
+  uint n_regions() { return _hrs.length(); }
 
   // The max number of regions in the heap.
-  size_t max_regions() { return _hrs.max_length(); }
+  uint max_regions() { return _hrs.max_length(); }
 
   // The number of regions that are completely free.
-  size_t free_regions() { return _free_list.length(); }
+  uint free_regions() { return _free_list.length(); }
 
   // The number of regions that are not completely free.
-  size_t used_regions() { return n_regions() - free_regions(); }
+  uint used_regions() { return n_regions() - free_regions(); }
 
   // The number of regions available for "regular" expansion.
-  size_t expansion_regions() { return _expansion_regions; }
+  uint expansion_regions() { return _expansion_regions; }
 
   // Factory method for HeapRegion instances. It will return NULL if
   // the allocation fails.
-  HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
+  HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
 
   void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
   void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
@@ -1301,7 +1301,7 @@
   void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
 
   // Return the region with the given index. It assumes the index is valid.
-  HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
+  HeapRegion* region_at(uint index) const { return _hrs.at(index); }
 
   // Divide the heap region sequence into "chunks" of some size (the number
   // of regions divided by the number of parallel threads times some
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -431,31 +431,36 @@
   }
 
   if (FLAG_IS_CMDLINE(NewSize)) {
-     _min_desired_young_length = MAX2((size_t) 1, NewSize / HeapRegion::GrainBytes);
+    _min_desired_young_length = MAX2((uint) (NewSize / HeapRegion::GrainBytes),
+                                     1U);
     if (FLAG_IS_CMDLINE(MaxNewSize)) {
-      _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
+      _max_desired_young_length =
+                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
+                                  1U);
       _sizer_kind = SizerMaxAndNewSize;
       _adaptive_size = _min_desired_young_length == _max_desired_young_length;
     } else {
       _sizer_kind = SizerNewSizeOnly;
     }
   } else if (FLAG_IS_CMDLINE(MaxNewSize)) {
-    _max_desired_young_length = MAX2((size_t) 1, MaxNewSize / HeapRegion::GrainBytes);
+    _max_desired_young_length =
+                             MAX2((uint) (MaxNewSize / HeapRegion::GrainBytes),
+                                  1U);
     _sizer_kind = SizerMaxNewSizeOnly;
   }
 }
 
-size_t G1YoungGenSizer::calculate_default_min_length(size_t new_number_of_heap_regions) {
-  size_t default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
-  return MAX2((size_t)1, default_value);
+uint G1YoungGenSizer::calculate_default_min_length(uint new_number_of_heap_regions) {
+  uint default_value = (new_number_of_heap_regions * G1DefaultMinNewGenPercent) / 100;
+  return MAX2(1U, default_value);
 }
 
-size_t G1YoungGenSizer::calculate_default_max_length(size_t new_number_of_heap_regions) {
-  size_t default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
-  return MAX2((size_t)1, default_value);
+uint G1YoungGenSizer::calculate_default_max_length(uint new_number_of_heap_regions) {
+  uint default_value = (new_number_of_heap_regions * G1DefaultMaxNewGenPercent) / 100;
+  return MAX2(1U, default_value);
 }
 
-void G1YoungGenSizer::heap_size_changed(size_t new_number_of_heap_regions) {
+void G1YoungGenSizer::heap_size_changed(uint new_number_of_heap_regions) {
   assert(new_number_of_heap_regions > 0, "Heap must be initialized");
 
   switch (_sizer_kind) {
@@ -512,16 +517,16 @@
   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 }
 
-bool G1CollectorPolicy::predict_will_fit(size_t young_length,
+bool G1CollectorPolicy::predict_will_fit(uint young_length,
                                          double base_time_ms,
-                                         size_t base_free_regions,
+                                         uint base_free_regions,
                                          double target_pause_time_ms) {
   if (young_length >= base_free_regions) {
     // end condition 1: not enough space for the young regions
     return false;
   }
 
-  double accum_surv_rate = accum_yg_surv_rate_pred((int)(young_length - 1));
+  double accum_surv_rate = accum_yg_surv_rate_pred((int) young_length - 1);
   size_t bytes_to_copy =
                (size_t) (accum_surv_rate * (double) HeapRegion::GrainBytes);
   double copy_time_ms = predict_object_copy_time_ms(bytes_to_copy);
@@ -533,7 +538,7 @@
   }
 
   size_t free_bytes =
-                  (base_free_regions - young_length) * HeapRegion::GrainBytes;
+                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
   if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
     // end condition 3: out-of-space (conservatively!)
     return false;
@@ -543,25 +548,25 @@
   return true;
 }
 
-void G1CollectorPolicy::record_new_heap_size(size_t new_number_of_regions) {
+void G1CollectorPolicy::record_new_heap_size(uint new_number_of_regions) {
   // re-calculate the necessary reserve
   double reserve_regions_d = (double) new_number_of_regions * _reserve_factor;
   // We use ceiling so that if reserve_regions_d is > 0.0 (but
   // smaller than 1.0) we'll get 1.
-  _reserve_regions = (size_t) ceil(reserve_regions_d);
+  _reserve_regions = (uint) ceil(reserve_regions_d);
 
   _young_gen_sizer->heap_size_changed(new_number_of_regions);
 }
 
-size_t G1CollectorPolicy::calculate_young_list_desired_min_length(
-                                                     size_t base_min_length) {
-  size_t desired_min_length = 0;
+uint G1CollectorPolicy::calculate_young_list_desired_min_length(
+                                                       uint base_min_length) {
+  uint desired_min_length = 0;
   if (adaptive_young_list_length()) {
     if (_alloc_rate_ms_seq->num() > 3) {
       double now_sec = os::elapsedTime();
       double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
       double alloc_rate_ms = predict_alloc_rate_ms();
-      desired_min_length = (size_t) ceil(alloc_rate_ms * when_ms);
+      desired_min_length = (uint) ceil(alloc_rate_ms * when_ms);
     } else {
       // otherwise we don't have enough info to make the prediction
     }
@@ -571,7 +576,7 @@
   return MAX2(_young_gen_sizer->min_desired_young_length(), desired_min_length);
 }
 
-size_t G1CollectorPolicy::calculate_young_list_desired_max_length() {
+uint G1CollectorPolicy::calculate_young_list_desired_max_length() {
   // Here, we might want to also take into account any additional
   // constraints (i.e., user-defined minimum bound). Currently, we
   // effectively don't set this bound.
@@ -588,11 +593,11 @@
   // Calculate the absolute and desired min bounds.
 
   // This is how many young regions we already have (currently: the survivors).
-  size_t base_min_length = recorded_survivor_regions();
+  uint base_min_length = recorded_survivor_regions();
   // This is the absolute minimum young length, which ensures that we
   // can allocate one eden region in the worst-case.
-  size_t absolute_min_length = base_min_length + 1;
-  size_t desired_min_length =
+  uint absolute_min_length = base_min_length + 1;
+  uint desired_min_length =
                      calculate_young_list_desired_min_length(base_min_length);
   if (desired_min_length < absolute_min_length) {
     desired_min_length = absolute_min_length;
@@ -601,16 +606,16 @@
   // Calculate the absolute and desired max bounds.
 
   // We will try our best not to "eat" into the reserve.
-  size_t absolute_max_length = 0;
+  uint absolute_max_length = 0;
   if (_free_regions_at_end_of_collection > _reserve_regions) {
     absolute_max_length = _free_regions_at_end_of_collection - _reserve_regions;
   }
-  size_t desired_max_length = calculate_young_list_desired_max_length();
+  uint desired_max_length = calculate_young_list_desired_max_length();
   if (desired_max_length > absolute_max_length) {
     desired_max_length = absolute_max_length;
   }
 
-  size_t young_list_target_length = 0;
+  uint young_list_target_length = 0;
   if (adaptive_young_list_length()) {
     if (gcs_are_young()) {
       young_list_target_length =
@@ -648,11 +653,11 @@
   update_max_gc_locker_expansion();
 }
 
-size_t
+uint
 G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths,
-                                                   size_t base_min_length,
-                                                   size_t desired_min_length,
-                                                   size_t desired_max_length) {
+                                                     uint base_min_length,
+                                                     uint desired_min_length,
+                                                     uint desired_max_length) {
   assert(adaptive_young_list_length(), "pre-condition");
   assert(gcs_are_young(), "only call this for young GCs");
 
@@ -667,9 +672,9 @@
   // will be reflected in the predictions by the
   // survivor_regions_evac_time prediction.
   assert(desired_min_length > base_min_length, "invariant");
-  size_t min_young_length = desired_min_length - base_min_length;
+  uint min_young_length = desired_min_length - base_min_length;
   assert(desired_max_length > base_min_length, "invariant");
-  size_t max_young_length = desired_max_length - base_min_length;
+  uint max_young_length = desired_max_length - base_min_length;
 
   double target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
   double survivor_regions_evac_time = predict_survivor_regions_evac_time();
@@ -679,8 +684,8 @@
   double base_time_ms =
     predict_base_elapsed_time_ms(pending_cards, scanned_cards) +
     survivor_regions_evac_time;
-  size_t available_free_regions = _free_regions_at_end_of_collection;
-  size_t base_free_regions = 0;
+  uint available_free_regions = _free_regions_at_end_of_collection;
+  uint base_free_regions = 0;
   if (available_free_regions > _reserve_regions) {
     base_free_regions = available_free_regions - _reserve_regions;
   }
@@ -717,9 +722,9 @@
       // the new max. This way we maintain the loop invariants.
 
       assert(min_young_length < max_young_length, "invariant");
-      size_t diff = (max_young_length - min_young_length) / 2;
+      uint diff = (max_young_length - min_young_length) / 2;
       while (diff > 0) {
-        size_t young_length = min_young_length + diff;
+        uint young_length = min_young_length + diff;
         if (predict_will_fit(young_length, base_time_ms,
                              base_free_regions, target_pause_time_ms)) {
           min_young_length = young_length;
@@ -1322,7 +1327,7 @@
     // given that humongous object allocations do not really affect
     // either the pause's duration nor when the next pause will take
     // place we can safely ignore them here.
-    size_t regions_allocated = eden_cset_region_length();
+    uint regions_allocated = eden_cset_region_length();
     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
     _alloc_rate_ms_seq->add(alloc_rate_ms);
 
@@ -1506,8 +1511,9 @@
     double pause_time_ms = elapsed_ms;
 
     size_t diff = 0;
-    if (_max_pending_cards >= _pending_cards)
+    if (_max_pending_cards >= _pending_cards) {
       diff = _max_pending_cards - _pending_cards;
+    }
     _pending_card_diff_seq->add((double) diff);
 
     double cost_per_card_ms = 0.0;
@@ -1741,8 +1747,7 @@
   return region_elapsed_time_ms;
 }
 
-size_t
-G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
+size_t G1CollectorPolicy::predict_bytes_to_copy(HeapRegion* hr) {
   size_t bytes_to_copy;
   if (hr->is_marked())
     bytes_to_copy = hr->max_live_bytes();
@@ -1756,8 +1761,8 @@
 }
 
 void
-G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
-                                          size_t survivor_cset_region_length) {
+G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
+                                            uint survivor_cset_region_length) {
   _eden_cset_region_length     = eden_cset_region_length;
   _survivor_cset_region_length = survivor_cset_region_length;
   _old_cset_region_length      = 0;
@@ -2021,7 +2026,7 @@
 }
 #endif // PRODUCT
 
-size_t G1CollectorPolicy::max_regions(int purpose) {
+uint G1CollectorPolicy::max_regions(int purpose) {
   switch (purpose) {
     case GCAllocForSurvived:
       return _max_survivor_regions;
@@ -2034,13 +2039,13 @@
 }
 
 void G1CollectorPolicy::update_max_gc_locker_expansion() {
-  size_t expansion_region_num = 0;
+  uint expansion_region_num = 0;
   if (GCLockerEdenExpansionPercent > 0) {
     double perc = (double) GCLockerEdenExpansionPercent / 100.0;
     double expansion_region_num_d = perc * (double) _young_list_target_length;
     // We use ceiling so that if expansion_region_num_d is > 0.0 (but
     // less than 1.0) we'll get 1.
-    expansion_region_num = (size_t) ceil(expansion_region_num_d);
+    expansion_region_num = (uint) ceil(expansion_region_num_d);
   } else {
     assert(expansion_region_num == 0, "sanity");
   }
@@ -2054,7 +2059,7 @@
                  (double) _young_list_target_length / (double) SurvivorRatio;
   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
   // smaller than 1.0) we'll get 1.
-  _max_survivor_regions = (size_t) ceil(max_survivor_regions_d);
+  _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
 
   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
         HeapRegion::GrainWords * _max_survivor_regions);
@@ -2288,27 +2293,25 @@
                            (clear_marked_end_sec - start_sec) * 1000.0);
   }
 
+  uint region_num = _g1->n_regions();
   if (G1CollectedHeap::use_parallel_gc_threads()) {
-    const size_t OverpartitionFactor = 4;
-    size_t WorkUnit;
+    const uint OverpartitionFactor = 4;
+    uint WorkUnit;
     // The use of MinChunkSize = 8 in the original code
     // causes some assertion failures when the total number of
     // region is less than 8.  The code here tries to fix that.
     // Should the original code also be fixed?
     if (no_of_gc_threads > 0) {
-      const size_t MinWorkUnit =
-        MAX2(_g1->n_regions() / no_of_gc_threads, (size_t) 1U);
-      WorkUnit =
-        MAX2(_g1->n_regions() / (no_of_gc_threads * OverpartitionFactor),
-             MinWorkUnit);
+      const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
+      WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
+                      MinWorkUnit);
     } else {
       assert(no_of_gc_threads > 0,
         "The active gc workers should be greater than 0");
       // In a product build do something reasonable to avoid a crash.
-      const size_t MinWorkUnit =
-        MAX2(_g1->n_regions() / ParallelGCThreads, (size_t) 1U);
+      const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
       WorkUnit =
-        MAX2(_g1->n_regions() / (ParallelGCThreads * OverpartitionFactor),
+        MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
              MinWorkUnit);
     }
     _collectionSetChooser->prepareForAddMarkedHeapRegionsPar(_g1->n_regions(),
@@ -2624,8 +2627,8 @@
   // pause are appended to the RHS of the young list, i.e.
   //   [Newly Young Regions ++ Survivors from last pause].
 
-  size_t survivor_region_length = young_list->survivor_length();
-  size_t eden_region_length = young_list->length() - survivor_region_length;
+  uint survivor_region_length = young_list->survivor_length();
+  uint eden_region_length = young_list->length() - survivor_region_length;
   init_cset_region_lengths(eden_region_length, survivor_region_length);
   hr = young_list->first_survivor_region();
   while (hr != NULL) {
@@ -2664,10 +2667,10 @@
   if (!gcs_are_young()) {
     CollectionSetChooser* cset_chooser = _collectionSetChooser;
     assert(cset_chooser->verify(), "CSet Chooser verification - pre");
-    const size_t min_old_cset_length = cset_chooser->calcMinOldCSetLength();
-    const size_t max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
-
-    size_t expensive_region_num = 0;
+    const uint min_old_cset_length = cset_chooser->calcMinOldCSetLength();
+    const uint max_old_cset_length = cset_chooser->calcMaxOldCSetLength();
+
+    uint expensive_region_num = 0;
     bool check_time_remaining = adaptive_young_list_length();
     HeapRegion* hr = cset_chooser->peek();
     while (hr != NULL) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -128,19 +128,19 @@
     SizerNewRatio
   };
   SizerKind _sizer_kind;
-  size_t _min_desired_young_length;
-  size_t _max_desired_young_length;
+  uint _min_desired_young_length;
+  uint _max_desired_young_length;
   bool _adaptive_size;
-  size_t calculate_default_min_length(size_t new_number_of_heap_regions);
-  size_t calculate_default_max_length(size_t new_number_of_heap_regions);
+  uint calculate_default_min_length(uint new_number_of_heap_regions);
+  uint calculate_default_max_length(uint new_number_of_heap_regions);
 
 public:
   G1YoungGenSizer();
-  void heap_size_changed(size_t new_number_of_heap_regions);
-  size_t min_desired_young_length() {
+  void heap_size_changed(uint new_number_of_heap_regions);
+  uint min_desired_young_length() {
     return _min_desired_young_length;
   }
-  size_t max_desired_young_length() {
+  uint max_desired_young_length() {
     return _max_desired_young_length;
   }
   bool adaptive_young_list_length() {
@@ -175,7 +175,7 @@
 
   double _cur_collection_start_sec;
   size_t _cur_collection_pause_used_at_start_bytes;
-  size_t _cur_collection_pause_used_regions_at_start;
+  uint   _cur_collection_pause_used_regions_at_start;
   double _cur_collection_par_time_ms;
 
   double _cur_collection_code_root_fixup_time_ms;
@@ -233,13 +233,13 @@
   // indicates whether we are in young or mixed GC mode
   bool _gcs_are_young;
 
-  size_t _young_list_target_length;
-  size_t _young_list_fixed_length;
+  uint _young_list_target_length;
+  uint _young_list_fixed_length;
   size_t _prev_eden_capacity; // used for logging
 
   // The max number of regions we can extend the eden by while the GC
   // locker is active. This should be >= _young_list_target_length;
-  size_t _young_list_max_length;
+  uint _young_list_max_length;
 
   bool                  _last_gc_was_young;
 
@@ -257,7 +257,7 @@
   double                _gc_overhead_perc;
 
   double _reserve_factor;
-  size_t _reserve_regions;
+  uint _reserve_regions;
 
   bool during_marking() {
     return _during_marking;
@@ -292,18 +292,18 @@
 
   G1YoungGenSizer* _young_gen_sizer;
 
-  size_t _eden_cset_region_length;
-  size_t _survivor_cset_region_length;
-  size_t _old_cset_region_length;
+  uint _eden_cset_region_length;
+  uint _survivor_cset_region_length;
+  uint _old_cset_region_length;
 
-  void init_cset_region_lengths(size_t eden_cset_region_length,
-                                size_t survivor_cset_region_length);
+  void init_cset_region_lengths(uint eden_cset_region_length,
+                                uint survivor_cset_region_length);
 
-  size_t eden_cset_region_length()     { return _eden_cset_region_length;     }
-  size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
-  size_t old_cset_region_length()      { return _old_cset_region_length;      }
+  uint eden_cset_region_length()     { return _eden_cset_region_length;     }
+  uint survivor_cset_region_length() { return _survivor_cset_region_length; }
+  uint old_cset_region_length()      { return _old_cset_region_length;      }
 
-  size_t _free_regions_at_end_of_collection;
+  uint _free_regions_at_end_of_collection;
 
   size_t _recorded_rs_lengths;
   size_t _max_rs_lengths;
@@ -496,10 +496,10 @@
 
   void set_recorded_rs_lengths(size_t rs_lengths);
 
-  size_t cset_region_length()       { return young_cset_region_length() +
-                                             old_cset_region_length(); }
-  size_t young_cset_region_length() { return eden_cset_region_length() +
-                                             survivor_cset_region_length(); }
+  uint cset_region_length()       { return young_cset_region_length() +
+                                           old_cset_region_length(); }
+  uint young_cset_region_length() { return eden_cset_region_length() +
+                                           survivor_cset_region_length(); }
 
   void record_young_free_cset_time_ms(double time_ms) {
     _recorded_young_free_cset_time_ms = time_ms;
@@ -720,12 +720,12 @@
   // Calculate and return the minimum desired young list target
   // length. This is the minimum desired young list length according
   // to the user's inputs.
-  size_t calculate_young_list_desired_min_length(size_t base_min_length);
+  uint calculate_young_list_desired_min_length(uint base_min_length);
 
   // Calculate and return the maximum desired young list target
   // length. This is the maximum desired young list length according
   // to the user's inputs.
-  size_t calculate_young_list_desired_max_length();
+  uint calculate_young_list_desired_max_length();
 
   // Calculate and return the maximum young list target length that
   // can fit into the pause time goal. The parameters are: rs_lengths
@@ -733,18 +733,18 @@
   // be, base_min_length is the alreay existing number of regions in
   // the young list, min_length and max_length are the desired min and
   // max young list length according to the user's inputs.
-  size_t calculate_young_list_target_length(size_t rs_lengths,
-                                            size_t base_min_length,
-                                            size_t desired_min_length,
-                                            size_t desired_max_length);
+  uint calculate_young_list_target_length(size_t rs_lengths,
+                                          uint base_min_length,
+                                          uint desired_min_length,
+                                          uint desired_max_length);
 
   // Check whether a given young length (young_length) fits into the
   // given target pause time and whether the prediction for the amount
   // of objects to be copied for the given length will fit into the
   // given free space (expressed by base_free_regions).  It is used by
   // calculate_young_list_target_length().
-  bool predict_will_fit(size_t young_length, double base_time_ms,
-                        size_t base_free_regions, double target_pause_time_ms);
+  bool predict_will_fit(uint young_length, double base_time_ms,
+                        uint base_free_regions, double target_pause_time_ms);
 
   // Count the number of bytes used in the CS.
   void count_CS_bytes_used();
@@ -773,7 +773,7 @@
   }
 
   // This should be called after the heap is resized.
-  void record_new_heap_size(size_t new_number_of_regions);
+  void record_new_heap_size(uint new_number_of_regions);
 
   void init();
 
@@ -1048,18 +1048,18 @@
   }
 
   bool is_young_list_full() {
-    size_t young_list_length = _g1->young_list()->length();
-    size_t young_list_target_length = _young_list_target_length;
+    uint young_list_length = _g1->young_list()->length();
+    uint young_list_target_length = _young_list_target_length;
     return young_list_length >= young_list_target_length;
   }
 
   bool can_expand_young_list() {
-    size_t young_list_length = _g1->young_list()->length();
-    size_t young_list_max_length = _young_list_max_length;
+    uint young_list_length = _g1->young_list()->length();
+    uint young_list_max_length = _young_list_max_length;
     return young_list_length < young_list_max_length;
   }
 
-  size_t young_list_max_length() {
+  uint young_list_max_length() {
     return _young_list_max_length;
   }
 
@@ -1097,7 +1097,7 @@
   int _tenuring_threshold;
 
   // The limit on the number of regions allocated for survivors.
-  size_t _max_survivor_regions;
+  uint _max_survivor_regions;
 
   // For reporting purposes.
   size_t _eden_bytes_before_gc;
@@ -1105,7 +1105,7 @@
   size_t _capacity_before_gc;
 
   // The amount of survor regions after a collection.
-  size_t _recorded_survivor_regions;
+  uint _recorded_survivor_regions;
   // List of survivor regions.
   HeapRegion* _recorded_survivor_head;
   HeapRegion* _recorded_survivor_tail;
@@ -1127,9 +1127,9 @@
     return purpose == GCAllocForSurvived;
   }
 
-  static const size_t REGIONS_UNLIMITED = ~(size_t)0;
+  static const uint REGIONS_UNLIMITED = (uint) -1;
 
-  size_t max_regions(int purpose);
+  uint max_regions(int purpose);
 
   // The limit on regions for a particular purpose is reached.
   void note_alloc_region_limit_reached(int purpose) {
@@ -1146,7 +1146,7 @@
     _survivor_surv_rate_group->stop_adding_regions();
   }
 
-  void record_survivor_regions(size_t      regions,
+  void record_survivor_regions(uint regions,
                                HeapRegion* head,
                                HeapRegion* tail) {
     _recorded_survivor_regions = regions;
@@ -1154,12 +1154,11 @@
     _recorded_survivor_tail    = tail;
   }
 
-  size_t recorded_survivor_regions() {
+  uint recorded_survivor_regions() {
     return _recorded_survivor_regions;
   }
 
-  void record_thread_age_table(ageTable* age_table)
-  {
+  void record_thread_age_table(ageTable* age_table) {
     _survivors_age_table.merge_par(age_table);
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ErgoVerbose.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -120,7 +120,7 @@
 
 // Single parameter format strings
 #define ergo_format_str(_name_)      ", " _name_ ": %s"
-#define ergo_format_region(_name_)   ", " _name_ ": "SIZE_FORMAT" regions"
+#define ergo_format_region(_name_)   ", " _name_ ": %u regions"
 #define ergo_format_byte(_name_)     ", " _name_ ": "SIZE_FORMAT" bytes"
 #define ergo_format_double(_name_)   ", " _name_ ": %1.2f"
 #define ergo_format_perc(_name_)     ", " _name_ ": %1.2f %%"
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -177,19 +177,19 @@
   // values we read here are possible (i.e., at a STW phase at the end
   // of a GC).
 
-  size_t young_list_length = g1->young_list()->length();
-  size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
+  uint young_list_length = g1->young_list()->length();
+  uint survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
   assert(young_list_length >= survivor_list_length, "invariant");
-  size_t eden_list_length = young_list_length - survivor_list_length;
+  uint eden_list_length = young_list_length - survivor_list_length;
   // Max length includes any potential extensions to the young gen
   // we'll do when the GC locker is active.
-  size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
+  uint young_list_max_length = g1->g1_policy()->young_list_max_length();
   assert(young_list_max_length >= survivor_list_length, "invariant");
-  size_t eden_list_max_length = young_list_max_length - survivor_list_length;
+  uint eden_list_max_length = young_list_max_length - survivor_list_length;
 
   _overall_used = g1->used_unlocked();
-  _eden_used = eden_list_length * HeapRegion::GrainBytes;
-  _survivor_used = survivor_list_length * HeapRegion::GrainBytes;
+  _eden_used = (size_t) eden_list_length * HeapRegion::GrainBytes;
+  _survivor_used = (size_t) survivor_list_length * HeapRegion::GrainBytes;
   _young_region_num = young_list_length;
   _old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
 
@@ -207,7 +207,7 @@
   committed -= _survivor_committed + _old_committed;
 
   // Next, calculate and remove the committed size for the eden.
-  _eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
+  _eden_committed = (size_t) eden_list_max_length * HeapRegion::GrainBytes;
   // Somewhat defensive: be robust in case there are inaccuracies in
   // the calculations
   _eden_committed = MIN2(_eden_committed, committed);
@@ -237,10 +237,10 @@
   // When a new eden region is allocated, only the eden_used size is
   // affected (since we have recalculated everything else at the last GC).
 
-  size_t young_region_num = g1h()->young_list()->length();
+  uint young_region_num = g1h()->young_list()->length();
   if (young_region_num > _young_region_num) {
-    size_t diff = young_region_num - _young_region_num;
-    _eden_used += diff * HeapRegion::GrainBytes;
+    uint diff = young_region_num - _young_region_num;
+    _eden_used += (size_t) diff * HeapRegion::GrainBytes;
     // Somewhat defensive: cap the eden used size to make sure it
     // never exceeds the committed size.
     _eden_used = MIN2(_eden_used, _eden_committed);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -147,7 +147,7 @@
   size_t _overall_committed;
   size_t _overall_used;
 
-  size_t _young_region_num;
+  uint   _young_region_num;
   size_t _young_gen_committed;
   size_t _eden_committed;
   size_t _eden_used;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -334,7 +334,7 @@
 
   guarantee(GrainWords == 0, "we should only set it once");
   GrainWords = GrainBytes >> LogHeapWordSize;
-  guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
+  guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity");
 
   guarantee(CardsPerRegion == 0, "we should only set it once");
   CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
@@ -482,10 +482,10 @@
 #endif // _MSC_VER
 
 
-HeapRegion::
-HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
-           MemRegion mr, bool is_zeroed)
-  : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
+HeapRegion::HeapRegion(uint hrs_index,
+                       G1BlockOffsetSharedArray* sharedOffsetArray,
+                       MemRegion mr, bool is_zeroed) :
+    G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
     _hrs_index(hrs_index),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
     _in_collection_set(false),
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -52,12 +52,15 @@
 class HeapRegion;
 class HeapRegionSetBase;
 
-#define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
+#define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
 #define HR_FORMAT_PARAMS(_hr_) \
                 (_hr_)->hrs_index(), \
                 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
                 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
 
+// sentinel value for hrs_index
+#define G1_NULL_HRS_INDEX ((uint) -1)
+
 // A dirty card to oop closure for heap regions. It
 // knows how to get the G1 heap and how to use the bitmap
 // in the concurrent marker used by G1 to filter remembered
@@ -235,7 +238,7 @@
 
  protected:
   // The index of this region in the heap region sequence.
-  size_t  _hrs_index;
+  uint  _hrs_index;
 
   HumongousType _humongous_type;
   // For a humongous region, region in which it starts.
@@ -342,7 +345,7 @@
 
  public:
   // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
-  HeapRegion(size_t hrs_index,
+  HeapRegion(uint hrs_index,
              G1BlockOffsetSharedArray* sharedOffsetArray,
              MemRegion mr, bool is_zeroed);
 
@@ -389,7 +392,7 @@
 
   // If this region is a member of a HeapRegionSeq, the index in that
   // sequence, otherwise -1.
-  size_t hrs_index() const { return _hrs_index; }
+  uint hrs_index() const { return _hrs_index; }
 
   // The number of bytes marked live in the region in the last marking phase.
   size_t marked_bytes()    { return _prev_marked_bytes; }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -577,7 +577,7 @@
 #endif
 
 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
-  size_t cur_hrs_ind = hr()->hrs_index();
+  size_t cur_hrs_ind = (size_t) hr()->hrs_index();
 
 #if HRRS_VERBOSE
   gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@@ -841,7 +841,7 @@
 #endif
 
   // Set the corresponding coarse bit.
-  size_t max_hrs_index = max->hr()->hrs_index();
+  size_t max_hrs_index = (size_t) max->hr()->hrs_index();
   if (!_coarse_map.at(max_hrs_index)) {
     _coarse_map.at_put(max_hrs_index, true);
     _n_coarse_entries++;
@@ -866,17 +866,20 @@
 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
                               BitMap* region_bm, BitMap* card_bm) {
   // First eliminated garbage regions from the coarse map.
-  if (G1RSScrubVerbose)
-    gclog_or_tty->print_cr("Scrubbing region "SIZE_FORMAT":",
-                           hr()->hrs_index());
+  if (G1RSScrubVerbose) {
+    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index());
+  }
 
   assert(_coarse_map.size() == region_bm->size(), "Precondition");
-  if (G1RSScrubVerbose)
-    gclog_or_tty->print("   Coarse map: before = %d...", _n_coarse_entries);
+  if (G1RSScrubVerbose) {
+    gclog_or_tty->print("   Coarse map: before = "SIZE_FORMAT"...",
+                        _n_coarse_entries);
+  }
   _coarse_map.set_intersection(*region_bm);
   _n_coarse_entries = _coarse_map.count_one_bits();
-  if (G1RSScrubVerbose)
-    gclog_or_tty->print_cr("   after = %d.", _n_coarse_entries);
+  if (G1RSScrubVerbose) {
+    gclog_or_tty->print_cr("   after = "SIZE_FORMAT".", _n_coarse_entries);
+  }
 
   // Now do the fine-grained maps.
   for (size_t i = 0; i < _max_fine_entries; i++) {
@@ -885,23 +888,27 @@
     while (cur != NULL) {
       PosParPRT* nxt = cur->next();
       // If the entire region is dead, eliminate.
-      if (G1RSScrubVerbose)
-        gclog_or_tty->print_cr("     For other region "SIZE_FORMAT":",
+      if (G1RSScrubVerbose) {
+        gclog_or_tty->print_cr("     For other region %u:",
                                cur->hr()->hrs_index());
-      if (!region_bm->at(cur->hr()->hrs_index())) {
+      }
+      if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
         *prev = nxt;
         cur->set_next(NULL);
         _n_fine_entries--;
-        if (G1RSScrubVerbose)
+        if (G1RSScrubVerbose) {
           gclog_or_tty->print_cr("          deleted via region map.");
+        }
         PosParPRT::free(cur);
       } else {
         // Do fine-grain elimination.
-        if (G1RSScrubVerbose)
+        if (G1RSScrubVerbose) {
           gclog_or_tty->print("          occ: before = %4d.", cur->occupied());
+        }
         cur->scrub(ctbs, card_bm);
-        if (G1RSScrubVerbose)
+        if (G1RSScrubVerbose) {
           gclog_or_tty->print_cr("          after = %4d.", cur->occupied());
+        }
         // Did that empty the table completely?
         if (cur->occupied() == 0) {
           *prev = nxt;
@@ -1003,7 +1010,7 @@
 
 void OtherRegionsTable::clear_incoming_entry(HeapRegion* from_hr) {
   MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
-  size_t hrs_ind = from_hr->hrs_index();
+  size_t hrs_ind = (size_t) from_hr->hrs_index();
   size_t ind = hrs_ind & _mod_max_fine_entries_mask;
   if (del_single_region_table(ind, from_hr)) {
     assert(!_coarse_map.at(hrs_ind), "Inv");
@@ -1011,7 +1018,7 @@
     _coarse_map.par_at_put(hrs_ind, 0);
   }
   // Check to see if any of the fcc entries come from here.
-  size_t hr_ind = hr()->hrs_index();
+  size_t hr_ind = (size_t) hr()->hrs_index();
   for (int tid = 0; tid < HeapRegionRemSet::num_par_rem_sets(); tid++) {
     int fcc_ent = _from_card_cache[tid][hr_ind];
     if (fcc_ent != -1) {
@@ -1223,7 +1230,7 @@
     if ((size_t)_coarse_cur_region_index < _coarse_map->size()) {
       _coarse_cur_region_cur_card = 0;
       HeapWord* r_bot =
-        _g1h->region_at(_coarse_cur_region_index)->bottom();
+        _g1h->region_at((uint) _coarse_cur_region_index)->bottom();
       _cur_region_card_offset = _bosa->index_for(r_bot);
     } else {
       return false;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -329,13 +329,13 @@
 
   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   // (Uses it to initialize from_card_cache).
-  static void init_heap(size_t max_regions) {
-    OtherRegionsTable::init_from_card_cache(max_regions);
+  static void init_heap(uint max_regions) {
+    OtherRegionsTable::init_from_card_cache((size_t) max_regions);
   }
 
   // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
-  static void shrink_heap(size_t new_n_regs) {
-    OtherRegionsTable::shrink_from_card_cache(new_n_regs);
+  static void shrink_heap(uint new_n_regs) {
+    OtherRegionsTable::shrink_from_card_cache((size_t) new_n_regs);
   }
 
 #ifndef PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,16 +31,15 @@
 
 // Private
 
-size_t HeapRegionSeq::find_contiguous_from(size_t from, size_t num) {
-  size_t len = length();
+uint HeapRegionSeq::find_contiguous_from(uint from, uint num) {
+  uint len = length();
   assert(num > 1, "use this only for sequences of length 2 or greater");
   assert(from <= len,
-         err_msg("from: "SIZE_FORMAT" should be valid and <= than "SIZE_FORMAT,
-                 from, len));
+         err_msg("from: %u should be valid and <= than %u", from, len));
 
-  size_t curr = from;
-  size_t first = G1_NULL_HRS_INDEX;
-  size_t num_so_far = 0;
+  uint curr = from;
+  uint first = G1_NULL_HRS_INDEX;
+  uint num_so_far = 0;
   while (curr < len && num_so_far < num) {
     if (at(curr)->is_empty()) {
       if (first == G1_NULL_HRS_INDEX) {
@@ -60,7 +59,7 @@
     // we found enough space for the humongous object
     assert(from <= first && first < len, "post-condition");
     assert(first < curr && (curr - first) == num, "post-condition");
-    for (size_t i = first; i < first + num; ++i) {
+    for (uint i = first; i < first + num; ++i) {
       assert(at(i)->is_empty(), "post-condition");
     }
     return first;
@@ -73,10 +72,10 @@
 // Public
 
 void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
-                               size_t max_length) {
-  assert((size_t) bottom % HeapRegion::GrainBytes == 0,
+                               uint max_length) {
+  assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
          "bottom should be heap region aligned");
-  assert((size_t) end % HeapRegion::GrainBytes == 0,
+  assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
          "end should be heap region aligned");
 
   _length = 0;
@@ -88,8 +87,8 @@
   _max_length = max_length;
 
   _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length);
-  memset(_regions, 0, max_length * sizeof(HeapRegion*));
-  _regions_biased = _regions - ((size_t) bottom >> _region_shift);
+  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
+  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
 
   assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
          "bottom should be included in the region with index 0");
@@ -105,7 +104,7 @@
   assert(_heap_bottom <= next_bottom, "invariant");
   while (next_bottom < new_end) {
     assert(next_bottom < _heap_end, "invariant");
-    size_t index = length();
+    uint index = length();
 
     assert(index < _max_length, "otherwise we cannot expand further");
     if (index == 0) {
@@ -139,9 +138,9 @@
   return MemRegion(old_end, next_bottom);
 }
 
-size_t HeapRegionSeq::free_suffix() {
-  size_t res = 0;
-  size_t index = length();
+uint HeapRegionSeq::free_suffix() {
+  uint res = 0;
+  uint index = length();
   while (index > 0) {
     index -= 1;
     if (!at(index)->is_empty()) {
@@ -152,27 +151,24 @@
   return res;
 }
 
-size_t HeapRegionSeq::find_contiguous(size_t num) {
+uint HeapRegionSeq::find_contiguous(uint num) {
   assert(num > 1, "use this only for sequences of length 2 or greater");
   assert(_next_search_index <= length(),
-         err_msg("_next_search_indeex: "SIZE_FORMAT" "
-                 "should be valid and <= than "SIZE_FORMAT,
+         err_msg("_next_search_index: %u should be valid and <= than %u",
                  _next_search_index, length()));
 
-  size_t start = _next_search_index;
-  size_t res = find_contiguous_from(start, num);
+  uint start = _next_search_index;
+  uint res = find_contiguous_from(start, num);
   if (res == G1_NULL_HRS_INDEX && start > 0) {
     // Try starting from the beginning. If _next_search_index was 0,
     // no point in doing this again.
     res = find_contiguous_from(0, num);
   }
   if (res != G1_NULL_HRS_INDEX) {
-    assert(res < length(),
-           err_msg("res: "SIZE_FORMAT" should be valid", res));
+    assert(res < length(), err_msg("res: %u should be valid", res));
     _next_search_index = res + num;
     assert(_next_search_index <= length(),
-           err_msg("_next_search_indeex: "SIZE_FORMAT" "
-                   "should be valid and <= than "SIZE_FORMAT,
+           err_msg("_next_search_index: %u should be valid and <= than %u",
                    _next_search_index, length()));
   }
   return res;
@@ -183,20 +179,20 @@
 }
 
 void HeapRegionSeq::iterate_from(HeapRegion* hr, HeapRegionClosure* blk) const {
-  size_t hr_index = 0;
+  uint hr_index = 0;
   if (hr != NULL) {
-    hr_index = (size_t) hr->hrs_index();
+    hr_index = hr->hrs_index();
   }
 
-  size_t len = length();
-  for (size_t i = hr_index; i < len; i += 1) {
+  uint len = length();
+  for (uint i = hr_index; i < len; i += 1) {
     bool res = blk->doHeapRegion(at(i));
     if (res) {
       blk->incomplete();
       return;
     }
   }
-  for (size_t i = 0; i < hr_index; i += 1) {
+  for (uint i = 0; i < hr_index; i += 1) {
     bool res = blk->doHeapRegion(at(i));
     if (res) {
       blk->incomplete();
@@ -206,7 +202,7 @@
 }
 
 MemRegion HeapRegionSeq::shrink_by(size_t shrink_bytes,
-                                   size_t* num_regions_deleted) {
+                                   uint* num_regions_deleted) {
   // Reset this in case it's currently pointing into the regions that
   // we just removed.
   _next_search_index = 0;
@@ -218,7 +214,7 @@
   assert(_allocated_length > 0, "we should have at least one region committed");
 
   // around the loop, i will be the next region to be removed
-  size_t i = length() - 1;
+  uint i = length() - 1;
   assert(i > 0, "we should never remove all regions");
   // [last_start, end) is the MemRegion that covers the regions we will remove.
   HeapWord* end = at(i)->end();
@@ -249,29 +245,24 @@
 #ifndef PRODUCT
 void HeapRegionSeq::verify_optional() {
   guarantee(_length <= _allocated_length,
-            err_msg("invariant: _length: "SIZE_FORMAT" "
-                    "_allocated_length: "SIZE_FORMAT,
+            err_msg("invariant: _length: %u _allocated_length: %u",
                     _length, _allocated_length));
   guarantee(_allocated_length <= _max_length,
-            err_msg("invariant: _allocated_length: "SIZE_FORMAT" "
-                    "_max_length: "SIZE_FORMAT,
+            err_msg("invariant: _allocated_length: %u _max_length: %u",
                     _allocated_length, _max_length));
   guarantee(_next_search_index <= _length,
-            err_msg("invariant: _next_search_index: "SIZE_FORMAT" "
-                    "_length: "SIZE_FORMAT,
+            err_msg("invariant: _next_search_index: %u _length: %u",
                     _next_search_index, _length));
 
   HeapWord* prev_end = _heap_bottom;
-  for (size_t i = 0; i < _allocated_length; i += 1) {
+  for (uint i = 0; i < _allocated_length; i += 1) {
     HeapRegion* hr = _regions[i];
-    guarantee(hr != NULL, err_msg("invariant: i: "SIZE_FORMAT, i));
+    guarantee(hr != NULL, err_msg("invariant: i: %u", i));
     guarantee(hr->bottom() == prev_end,
-              err_msg("invariant i: "SIZE_FORMAT" "HR_FORMAT" "
-                      "prev_end: "PTR_FORMAT,
+              err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
                       i, HR_FORMAT_PARAMS(hr), prev_end));
     guarantee(hr->hrs_index() == i,
-              err_msg("invariant: i: "SIZE_FORMAT" hrs_index(): "SIZE_FORMAT,
-                      i, hr->hrs_index()));
+              err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
     if (i < _length) {
       // Asserts will fire if i is >= _length
       HeapWord* addr = hr->bottom();
@@ -290,8 +281,8 @@
       prev_end = hr->end();
     }
   }
-  for (size_t i = _allocated_length; i < _max_length; i += 1) {
-    guarantee(_regions[i] == NULL, err_msg("invariant i: "SIZE_FORMAT, i));
+  for (uint i = _allocated_length; i < _max_length; i += 1) {
+    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
   }
 }
 #endif // PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,8 +29,6 @@
 class HeapRegionClosure;
 class FreeRegionList;
 
-#define G1_NULL_HRS_INDEX ((size_t) -1)
-
 // This class keeps track of the region metadata (i.e., HeapRegion
 // instances). They are kept in the _regions array in address
 // order. A region's index in the array corresponds to its index in
@@ -65,7 +63,7 @@
   HeapRegion** _regions_biased;
 
   // The number of regions committed in the heap.
-  size_t _length;
+  uint _length;
 
   // The address of the first reserved word in the heap.
   HeapWord* _heap_bottom;
@@ -74,32 +72,32 @@
   HeapWord* _heap_end;
 
   // The log of the region byte size.
-  size_t _region_shift;
+  uint _region_shift;
 
   // A hint for which index to start searching from for humongous
   // allocations.
-  size_t _next_search_index;
+  uint _next_search_index;
 
   // The number of regions for which we have allocated HeapRegions for.
-  size_t _allocated_length;
+  uint _allocated_length;
 
   // The maximum number of regions in the heap.
-  size_t _max_length;
+  uint _max_length;
 
   // Find a contiguous set of empty regions of length num, starting
   // from the given index.
-  size_t find_contiguous_from(size_t from, size_t num);
+  uint find_contiguous_from(uint from, uint num);
 
   // Map a heap address to a biased region index. Assume that the
   // address is valid.
-  inline size_t addr_to_index_biased(HeapWord* addr) const;
+  inline uintx addr_to_index_biased(HeapWord* addr) const;
 
-  void increment_length(size_t* length) {
+  void increment_length(uint* length) {
     assert(*length < _max_length, "pre-condition");
     *length += 1;
   }
 
-  void decrement_length(size_t* length) {
+  void decrement_length(uint* length) {
     assert(*length > 0, "pre-condition");
     *length -= 1;
   }
@@ -108,11 +106,11 @@
   // Empty contructor, we'll initialize it with the initialize() method.
   HeapRegionSeq() { }
 
-  void initialize(HeapWord* bottom, HeapWord* end, size_t max_length);
+  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
 
   // Return the HeapRegion at the given index. Assume that the index
   // is valid.
-  inline HeapRegion* at(size_t index) const;
+  inline HeapRegion* at(uint index) const;
 
   // If addr is within the committed space return its corresponding
   // HeapRegion, otherwise return NULL.
@@ -123,10 +121,10 @@
   inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
 
   // Return the number of regions that have been committed in the heap.
-  size_t length() const { return _length; }
+  uint length() const { return _length; }
 
   // Return the maximum number of regions in the heap.
-  size_t max_length() const { return _max_length; }
+  uint max_length() const { return _max_length; }
 
   // Expand the sequence to reflect that the heap has grown from
   // old_end to new_end. Either create new HeapRegions, or re-use
@@ -139,12 +137,12 @@
 
   // Return the number of contiguous regions at the end of the sequence
   // that are available for allocation.
-  size_t free_suffix();
+  uint free_suffix();
 
   // Find a contiguous set of empty regions of length num and return
   // the index of the first region or G1_NULL_HRS_INDEX if the
   // search was unsuccessful.
-  size_t find_contiguous(size_t num);
+  uint find_contiguous(uint num);
 
   // Apply blk->doHeapRegion() on all committed regions in address order,
   // terminating the iteration early if doHeapRegion() returns true.
@@ -159,7 +157,7 @@
   // sequence. Return a MemRegion that corresponds to the address
   // range of the uncommitted regions. Assume shrink_bytes is page and
   // heap region aligned.
-  MemRegion shrink_by(size_t shrink_bytes, size_t* num_regions_deleted);
+  MemRegion shrink_by(size_t shrink_bytes, uint* num_regions_deleted);
 
   // Do some sanity checking.
   void verify_optional() PRODUCT_RETURN;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,11 +28,11 @@
 #include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 
-inline size_t HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
+inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
   assert(_heap_bottom <= addr && addr < _heap_end,
          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
                  addr, _heap_bottom, _heap_end));
-  size_t index = (size_t) addr >> _region_shift;
+  uintx index = (uintx) addr >> _region_shift;
   return index;
 }
 
@@ -40,7 +40,7 @@
   assert(_heap_bottom <= addr && addr < _heap_end,
          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
                  addr, _heap_bottom, _heap_end));
-  size_t index_biased = addr_to_index_biased(addr);
+  uintx index_biased = addr_to_index_biased(addr);
   HeapRegion* hr = _regions_biased[index_biased];
   assert(hr != NULL, "invariant");
   return hr;
@@ -55,7 +55,7 @@
   return NULL;
 }
 
-inline HeapRegion* HeapRegionSeq::at(size_t index) const {
+inline HeapRegion* HeapRegionSeq::at(uint index) const {
   assert(index < length(), "pre-condition");
   HeapRegion* hr = _regions[index];
   assert(hr != NULL, "sanity");
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,28 +25,26 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 
-size_t HeapRegionSetBase::_unrealistically_long_length = 0;
+uint HeapRegionSetBase::_unrealistically_long_length = 0;
 HRSPhase HeapRegionSetBase::_phase = HRSPhaseNone;
 
 //////////////////// HeapRegionSetBase ////////////////////
 
-void HeapRegionSetBase::set_unrealistically_long_length(size_t len) {
+void HeapRegionSetBase::set_unrealistically_long_length(uint len) {
   guarantee(_unrealistically_long_length == 0, "should only be set once");
   _unrealistically_long_length = len;
 }
 
-size_t HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
+uint HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
   assert(hr->startsHumongous(), "pre-condition");
   assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
-  size_t region_num = hr->capacity() >> HeapRegion::LogOfHRGrainBytes;
+  uint region_num = (uint) (hr->capacity() >> HeapRegion::LogOfHRGrainBytes);
   assert(region_num > 0, "sanity");
   return region_num;
 }
 
 void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
-  msg->append("[%s] %s "
-              "ln: "SIZE_FORMAT" rn: "SIZE_FORMAT" "
-              "cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
+  msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
               name(), message, length(), region_num(),
               total_capacity_bytes(), total_used_bytes());
   fill_in_ext_msg_extra(msg);
@@ -170,13 +168,11 @@
          hrs_ext_msg(this, "verification should be in progress"));
 
   guarantee(length() == _calc_length,
-            hrs_err_msg("[%s] length: "SIZE_FORMAT" should be == "
-                        "calc length: "SIZE_FORMAT,
+            hrs_err_msg("[%s] length: %u should be == calc length: %u",
                         name(), length(), _calc_length));
 
   guarantee(region_num() == _calc_region_num,
-            hrs_err_msg("[%s] region num: "SIZE_FORMAT" should be == "
-                        "calc region num: "SIZE_FORMAT,
+            hrs_err_msg("[%s] region num: %u should be == calc region num: %u",
                         name(), region_num(), _calc_region_num));
 
   guarantee(total_capacity_bytes() == _calc_total_capacity_bytes,
@@ -211,8 +207,8 @@
   out->print_cr("    humongous         : %s", BOOL_TO_STR(regions_humongous()));
   out->print_cr("    empty             : %s", BOOL_TO_STR(regions_empty()));
   out->print_cr("  Attributes");
-  out->print_cr("    length            : "SIZE_FORMAT_W(14), length());
-  out->print_cr("    region num        : "SIZE_FORMAT_W(14), region_num());
+  out->print_cr("    length            : %14u", length());
+  out->print_cr("    region num        : %14u", region_num());
   out->print_cr("    total capacity    : "SIZE_FORMAT_W(14)" bytes",
                 total_capacity_bytes());
   out->print_cr("    total used        : "SIZE_FORMAT_W(14)" bytes",
@@ -243,14 +239,12 @@
   if (proxy_set->is_empty()) return;
 
   assert(proxy_set->length() <= _length,
-         hrs_err_msg("[%s] proxy set length: "SIZE_FORMAT" "
-                     "should be <= length: "SIZE_FORMAT,
+         hrs_err_msg("[%s] proxy set length: %u should be <= length: %u",
                      name(), proxy_set->length(), _length));
   _length -= proxy_set->length();
 
   assert(proxy_set->region_num() <= _region_num,
-         hrs_err_msg("[%s] proxy set region num: "SIZE_FORMAT" "
-                     "should be <= region num: "SIZE_FORMAT,
+         hrs_err_msg("[%s] proxy set region num: %u should be <= region num: %u",
                      name(), proxy_set->region_num(), _region_num));
   _region_num -= proxy_set->region_num();
 
@@ -369,17 +363,17 @@
   verify_optional();
 }
 
-void HeapRegionLinkedList::remove_all_pending(size_t target_count) {
+void HeapRegionLinkedList::remove_all_pending(uint target_count) {
   hrs_assert_mt_safety_ok(this);
   assert(target_count > 1, hrs_ext_msg(this, "pre-condition"));
   assert(!is_empty(), hrs_ext_msg(this, "pre-condition"));
 
   verify_optional();
-  DEBUG_ONLY(size_t old_length = length();)
+  DEBUG_ONLY(uint old_length = length();)
 
   HeapRegion* curr = _head;
   HeapRegion* prev = NULL;
-  size_t count = 0;
+  uint count = 0;
   while (curr != NULL) {
     hrs_assert_region_ok(this, curr, this);
     HeapRegion* next = curr->next();
@@ -387,7 +381,7 @@
     if (curr->pending_removal()) {
       assert(count < target_count,
              hrs_err_msg("[%s] should not come across more regions "
-                         "pending for removal than target_count: "SIZE_FORMAT,
+                         "pending for removal than target_count: %u",
                          name(), target_count));
 
       if (prev == NULL) {
@@ -422,12 +416,11 @@
   }
 
   assert(count == target_count,
-         hrs_err_msg("[%s] count: "SIZE_FORMAT" should be == "
-                     "target_count: "SIZE_FORMAT, name(), count, target_count));
+         hrs_err_msg("[%s] count: %u should be == target_count: %u",
+                     name(), count, target_count));
   assert(length() + target_count == old_length,
          hrs_err_msg("[%s] new length should be consistent "
-                     "new length: "SIZE_FORMAT" old length: "SIZE_FORMAT" "
-                     "target_count: "SIZE_FORMAT,
+                     "new length: %u old length: %u target_count: %u",
                      name(), length(), old_length, target_count));
 
   verify_optional();
@@ -444,16 +437,16 @@
   HeapRegion* curr  = _head;
   HeapRegion* prev1 = NULL;
   HeapRegion* prev0 = NULL;
-  size_t      count = 0;
+  uint        count = 0;
   while (curr != NULL) {
     verify_next_region(curr);
 
     count += 1;
     guarantee(count < _unrealistically_long_length,
-              hrs_err_msg("[%s] the calculated length: "SIZE_FORMAT" "
+              hrs_err_msg("[%s] the calculated length: %u "
                           "seems very long, is there maybe a cycle? "
                           "curr: "PTR_FORMAT" prev0: "PTR_FORMAT" "
-                          "prev1: "PTR_FORMAT" length: "SIZE_FORMAT,
+                          "prev1: "PTR_FORMAT" length: %u",
                           name(), count, curr, prev0, prev1, length()));
 
     prev1 = prev0;
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -62,20 +62,20 @@
   friend class VMStructs;
 
 protected:
-  static size_t calculate_region_num(HeapRegion* hr);
+  static uint calculate_region_num(HeapRegion* hr);
 
-  static size_t _unrealistically_long_length;
+  static uint _unrealistically_long_length;
 
   // The number of regions added to the set. If the set contains
   // only humongous regions, this reflects only 'starts humongous'
   // regions and does not include 'continues humongous' ones.
-  size_t _length;
+  uint _length;
 
   // The total number of regions represented by the set. If the set
   // does not contain humongous regions, this should be the same as
   // _length. If the set contains only humongous regions, this will
   // include the 'continues humongous' regions.
-  size_t _region_num;
+  uint _region_num;
 
   // We don't keep track of the total capacity explicitly, we instead
   // recalculate it based on _region_num and the heap region size.
@@ -86,8 +86,8 @@
   const char* _name;
 
   bool        _verify_in_progress;
-  size_t      _calc_length;
-  size_t      _calc_region_num;
+  uint        _calc_length;
+  uint        _calc_region_num;
   size_t      _calc_total_capacity_bytes;
   size_t      _calc_total_used_bytes;
 
@@ -153,18 +153,18 @@
   HeapRegionSetBase(const char* name);
 
 public:
-  static void set_unrealistically_long_length(size_t len);
+  static void set_unrealistically_long_length(uint len);
 
   const char* name() { return _name; }
 
-  size_t length() { return _length; }
+  uint length() { return _length; }
 
   bool is_empty() { return _length == 0; }
 
-  size_t region_num() { return _region_num; }
+  uint region_num() { return _region_num; }
 
   size_t total_capacity_bytes() {
-    return region_num() << HeapRegion::LogOfHRGrainBytes;
+    return (size_t) region_num() << HeapRegion::LogOfHRGrainBytes;
   }
 
   size_t total_used_bytes() { return _total_used_bytes; }
@@ -341,7 +341,7 @@
   // of regions that are pending for removal in the list, and
   // target_count should be > 1 (currently, we never need to remove a
   // single region using this).
-  void remove_all_pending(size_t target_count);
+  void remove_all_pending(uint target_count);
 
   virtual void verify();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -54,15 +54,15 @@
   assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
   _length -= 1;
 
-  size_t region_num_diff;
+  uint region_num_diff;
   if (!hr->isHumongous()) {
     region_num_diff = 1;
   } else {
     region_num_diff = calculate_region_num(hr);
   }
   assert(region_num_diff <= _region_num,
-         hrs_err_msg("[%s] region's region num: "SIZE_FORMAT" "
-                     "should be <= region num: "SIZE_FORMAT,
+         hrs_err_msg("[%s] region's region num: %u "
+                     "should be <= region num: %u",
                      name(), region_num_diff, _region_num));
   _region_num -= region_num_diff;
 
--- a/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/sparsePRT.cpp	Wed Apr 18 07:21:15 2012 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -481,8 +481,7 @@
 
 bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) {
 #if SPARSE_PRT_VERBOSE
-  gclog_or_tty->print_cr("  Adding card %d from region %d to region "
-                         SIZE_FORMAT" sparse.",
+  gclog_or_tty->print_cr("  Adding card %d from region %d to region %u sparse.",
                          card_index, region_id, _hr->hrs_index());
 #endif
   if (_next->occupied_entries() * 2 > _next->capacity()) {
@@ -534,7 +533,7 @@
   _next = new RSHashTable(last->capacity() * 2);
 
 #if SPARSE_PRT_VERBOSE
-  gclog_or_tty->print_cr("  Expanded sparse table for "SIZE_FORMAT" to %d.",
+  gclog_or_tty->print_cr("  Expanded sparse table for %u to %d.",
                          _hr->hrs_index(), _next->capacity());
 #endif
   for (size_t i = 0; i < last->capacity(); i++) {
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Apr 11 16:18:45 2012 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Apr 18 07:21:15 2012 -0400
@@ -34,7 +34,7 @@
   static_field(HeapRegion, GrainBytes, size_t)                                \
                                                                               \
   nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
-  nonstatic_field(HeapRegionSeq,   _length,  size_t)                          \
+  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
                                                                               \
   nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
@@ -50,8 +50,8 @@
   nonstatic_field(G1MonitoringSupport, _old_committed,      size_t)           \
   nonstatic_field(G1MonitoringSupport, _old_used,           size_t)           \
                                                                               \
-  nonstatic_field(HeapRegionSetBase,   _length,             size_t)           \
-  nonstatic_field(HeapRegionSetBase,   _region_num,         size_t)           \
+  nonstatic_field(HeapRegionSetBase,   _length,             uint)             \
+  nonstatic_field(HeapRegionSetBase,   _region_num,         uint)             \
   nonstatic_field(HeapRegionSetBase,   _total_used_bytes,   size_t)           \