8000351: Tenuring threshold should be unsigned
authorjwilhelm
Wed, 03 Oct 2012 20:31:41 +0200
changeset 13925 37f75ba502b1
parent 13924 159131321ed4
child 13926 289339903fcf
child 13927 332925e36741
8000351: Tenuring threshold should be unsigned Summary: Change the flags and variables related to tenuring threshold to be unsigned Reviewed-by: jmasa, johnc
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp
hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp
hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp
hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp
hotspot/src/share/vm/memory/defNewGeneration.hpp
hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp
hotspot/src/share/vm/oops/markOop.hpp
hotspot/src/share/vm/oops/oop.hpp
hotspot/src/share/vm/oops/oop.inline.hpp
hotspot/src/share/vm/runtime/arguments.cpp
hotspot/src/share/vm/runtime/biasedLocking.cpp
hotspot/src/share/vm/runtime/globals.hpp
hotspot/src/share/vm/runtime/vmStructs.cpp
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -1195,9 +1195,9 @@
   set_promo_size(desired_promo_size);
 }
 
-int CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
+uint CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
                                              bool is_survivor_overflow,
-                                             int tenuring_threshold,
+                                             uint tenuring_threshold,
                                              size_t survivor_limit) {
   assert(survivor_limit >= generation_alignment(),
          "survivor_limit too small");
@@ -1315,7 +1315,7 @@
 
     gclog_or_tty->print( "  avg_promoted_padded_avg: %f"
                 "  avg_pretenured_padded_avg: %f"
-                "  tenuring_thresh: %d"
+                "  tenuring_thresh: %u"
                 "  target_size: " SIZE_FORMAT
                 "  survivor_limit: " SIZE_FORMAT,
                 gch->gc_stats(1)->avg_promoted()->padded_average(),
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -440,9 +440,9 @@
                                                    size_t max_eden_size);
   // Calculates new survivor space size;  returns a new tenuring threshold
   // value. Stores new survivor size in _survivor_size.
-  virtual int compute_survivor_space_size_and_threshold(
+  virtual uint compute_survivor_space_size_and_threshold(
                                                 bool   is_survivor_overflow,
-                                                int    tenuring_threshold,
+                                                uint   tenuring_threshold,
                                                 size_t survivor_limit);
 
   virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -840,8 +840,8 @@
   //
 
   // Current tenuring threshold, set to 0 if the collector reaches the
-  // maximum amount of suvivors regions.
-  int _tenuring_threshold;
+  // maximum amount of survivors regions.
+  uint _tenuring_threshold;
 
   // The limit on the number of regions allocated for survivors.
   uint _max_survivor_regions;
@@ -851,7 +851,7 @@
   size_t _survivor_bytes_before_gc;
   size_t _capacity_before_gc;
 
-  // The amount of survor regions after a collection.
+  // The amount of survivor regions after a collection.
   uint _recorded_survivor_regions;
   // List of survivor regions.
   HeapRegion* _recorded_survivor_head;
@@ -862,7 +862,7 @@
 public:
 
   inline GCAllocPurpose
-    evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
+    evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
       if (age < _tenuring_threshold && src_region->is_young()) {
         return GCAllocForSurvived;
       } else {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -941,9 +941,9 @@
   return promo_heap_delta;
 }
 
-int PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
+uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
                                              bool is_survivor_overflow,
-                                             int tenuring_threshold,
+                                             uint tenuring_threshold,
                                              size_t survivor_limit) {
   assert(survivor_limit >= _intra_generation_alignment,
          "survivor_limit too small");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -353,9 +353,9 @@
 
   // Calculates new survivor space size;  returns a new tenuring threshold
   // value. Stores new survivor size in _survivor_size.
-  int compute_survivor_space_size_and_threshold(bool   is_survivor_overflow,
-                                                int    tenuring_threshold,
-                                                size_t survivor_limit);
+  uint compute_survivor_space_size_and_threshold(bool   is_survivor_overflow,
+                                                 uint    tenuring_threshold,
+                                                 size_t survivor_limit);
 
   // Return the maximum size of a survivor space if the young generation were of
   // size gen_size.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -85,7 +85,7 @@
 
     if (!promote_immediately) {
       // Find the objects age, MT safe.
-      int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+      uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
         test_mark->displaced_mark_helper()->age() : test_mark->age();
 
       // Try allocating obj in to-space (unless too old)
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -59,7 +59,7 @@
 ReferenceProcessor*        PSScavenge::_ref_processor = NULL;
 CardTableExtension*        PSScavenge::_card_table = NULL;
 bool                       PSScavenge::_survivor_overflow = false;
-int                        PSScavenge::_tenuring_threshold = 0;
+uint                       PSScavenge::_tenuring_threshold = 0;
 HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
 elapsedTimer               PSScavenge::_accumulated_time;
 Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
@@ -529,7 +529,7 @@
 
        if (PrintTenuringDistribution) {
          gclog_or_tty->cr();
-         gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
+         gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %u (max %u)",
                                 size_policy->calculated_survivor_size_in_bytes(),
                                 _tenuring_threshold, MaxTenuringThreshold);
        }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -66,14 +66,14 @@
   static PSIsAliveClosure    _is_alive_closure;     // Closure used for reference processing
   static CardTableExtension* _card_table;           // We cache the card table for fast access.
   static bool                _survivor_overflow;    // Overflow this collection
-  static int                 _tenuring_threshold;   // tenuring threshold for next scavenge
+  static uint                _tenuring_threshold;   // tenuring threshold for next scavenge
   static elapsedTimer        _accumulated_time;     // total time spent on scavenge
   static HeapWord*           _young_generation_boundary; // The lowest address possible for the young_gen.
                                                          // This is used to decide if an oop should be scavenged,
                                                          // cards should be marked, etc.
   static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
   static Stack<oop, mtGC>     _preserved_oop_stack;  // List of oops that need their mark restored.
-  static CollectorCounters*      _counters;         // collector performance counters
+  static CollectorCounters*      _counters;          // collector performance counters
   static bool                    _promotion_failed;
 
   static void clean_up_failed_promotion();
@@ -88,7 +88,7 @@
 
  public:
   // Accessors
-  static int              tenuring_threshold()  { return _tenuring_threshold; }
+  static uint             tenuring_threshold()  { return _tenuring_threshold; }
   static elapsedTimer*    accumulated_time()    { return &_accumulated_time; }
   static bool             promotion_failed()    { return _promotion_failed; }
   static int              consecutive_skipped_scavenges()
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -642,7 +642,7 @@
 
 bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
                                             outputStream* st,
-                                            int tenuring_threshold_arg) const {
+                                            uint tenuring_threshold_arg) const {
   if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
     return false;
   }
@@ -663,7 +663,7 @@
     assert(!tenuring_threshold_change(), "(no change was attempted)");
   }
   if (tenuring_threshold_changed) {
-    st->print_cr("%d", tenuring_threshold_arg);
+    st->print_cr("%u", tenuring_threshold_arg);
   }
   return true;
 }
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -489,8 +489,8 @@
 
   // Printing support
   virtual bool print_adaptive_size_policy_on(outputStream* st) const;
-  bool print_adaptive_size_policy_on(outputStream* st, int
-                                  tenuring_threshold) const;
+  bool print_adaptive_size_policy_on(outputStream* st,
+                                     uint tenuring_threshold) const;
 };
 
 // Class that can be used to print information about the
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -78,10 +78,10 @@
   }
 }
 
-int ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
+uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
   size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
   size_t total = 0;
-  int age = 1;
+  uint age = 1;
   assert(sizes[0] == 0, "no objects with age zero should be recorded");
   while (age < table_size) {
     total += sizes[age];
@@ -90,13 +90,13 @@
     if (total > desired_survivor_size) break;
     age++;
   }
-  int result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
+  uint result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
 
   if (PrintTenuringDistribution || UsePerfData) {
 
     if (PrintTenuringDistribution) {
       gclog_or_tty->cr();
-      gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
+      gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %u (max %u)",
         desired_survivor_size*oopSize, result, MaxTenuringThreshold);
     }
 
@@ -106,7 +106,7 @@
       total += sizes[age];
       if (sizes[age] > 0) {
         if (PrintTenuringDistribution) {
-          gclog_or_tty->print_cr("- age %3d: %10ld bytes, %10ld total",
+          gclog_or_tty->print_cr("- age %3u: %10ld bytes, %10ld total",
             age, sizes[age]*oopSize, total*oopSize);
         }
       }
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -55,7 +55,7 @@
 
   // add entry
   void add(oop p, size_t oop_size) {
-    int age = p->age();
+    uint age = p->age();
     assert(age > 0 && age < table_size, "invalid age of object");
     sizes[age] += oop_size;
   }
@@ -66,7 +66,7 @@
   void merge_par(ageTable* subTable);
 
   // calculate new tenuring threshold based on age information
-  int compute_tenuring_threshold(size_t survivor_capacity);
+  uint compute_tenuring_threshold(size_t survivor_capacity);
 
  private:
   PerfVariable* _perf_sizes[table_size];
--- a/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -188,7 +188,7 @@
   inline void update_survivor_overflowed(bool survivor_overflowed) {
     _survivor_overflowed_counter->set_value(survivor_overflowed);
   }
-  inline void update_tenuring_threshold(int threshold) {
+  inline void update_tenuring_threshold(uint threshold) {
     tenuring_threshold()->set_value(threshold);
   }
   inline void update_increment_tenuring_threshold_for_gc_cost() {
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -43,7 +43,7 @@
 
 protected:
   Generation* _next_gen;
-  int         _tenuring_threshold;   // Tenuring threshold for next collection.
+  uint        _tenuring_threshold;   // Tenuring threshold for next collection.
   ageTable    _age_table;
   // Size of object to pretenure in words; command line provides bytes
   size_t        _pretenure_size_threshold_words;
@@ -325,7 +325,7 @@
                                 bool parallel = false);
 
   oop copy_to_survivor_space(oop old);
-  int tenuring_threshold() { return _tenuring_threshold; }
+  uint tenuring_threshold() { return _tenuring_threshold; }
 
   // Performance Counter support
   void update_counters();
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -35,7 +35,7 @@
 // the threads for allocation.
 //            It is thread-private at any time, but maybe multiplexed over
 //            time across multiple threads. The park()/unpark() pair is
-//            used to make it avaiable for such multiplexing.
+//            used to make it available for such multiplexing.
 class ThreadLocalAllocBuffer: public CHeapObj<mtThread> {
   friend class VMStructs;
 private:
--- a/hotspot/src/share/vm/oops/markOop.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/oops/markOop.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -318,7 +318,7 @@
     intptr_t tmp = (intptr_t) monitor;
     return (markOop) (tmp | monitor_value);
   }
-  static markOop encode(JavaThread* thread, int age, int bias_epoch) {
+  static markOop encode(JavaThread* thread, uint age, int bias_epoch) {
     intptr_t tmp = (intptr_t) thread;
     assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
     assert(age <= max_age, "age too large");
@@ -333,10 +333,10 @@
   markOop set_marked()   { return markOop((value() & ~lock_mask_in_place) | marked_value); }
   markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }
 
-  int     age()               const { return mask_bits(value() >> age_shift, age_mask); }
-  markOop set_age(int v) const {
+  uint    age()               const { return mask_bits(value() >> age_shift, age_mask); }
+  markOop set_age(uint v) const {
     assert((v & ~age_mask) == 0, "shouldn't overflow age field");
-    return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
+    return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
   }
   markOop incr_age()          const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
 
--- a/hotspot/src/share/vm/oops/oop.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/oops/oop.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -327,7 +327,7 @@
   oop forwardee() const;
 
   // Age of object during scavenge
-  int age() const;
+  uint age() const;
   void incr_age();
 
   // Adjust all pointers in this object to point at it's forwarded location and
--- a/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -693,7 +693,7 @@
 }
 
 // The following method needs to be MT safe.
-inline int oopDesc::age() const {
+inline uint oopDesc::age() const {
   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
   if (has_displaced_mark()) {
     return displaced_mark()->age();
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -1112,7 +1112,7 @@
     // AlwaysTenure flag should make ParNew promote all at first collection.
     // See CR 6362902.
     if (AlwaysTenure) {
-      FLAG_SET_CMDLINE(intx, MaxTenuringThreshold, 0);
+      FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0);
     }
     // When using compressed oops, we use local overflow stacks,
     // rather than using a global overflow list chained through
@@ -1231,7 +1231,7 @@
   // promote all objects surviving "tenuring_default" scavenges.
   if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
       FLAG_IS_DEFAULT(SurvivorRatio)) {
-    FLAG_SET_ERGO(intx, MaxTenuringThreshold, tenuring_default);
+    FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
   }
   // If we decided above (or user explicitly requested)
   // `promote all' (via MaxTenuringThreshold := 0),
--- a/hotspot/src/share/vm/runtime/biasedLocking.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/runtime/biasedLocking.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -154,7 +154,7 @@
     return BiasedLocking::NOT_BIASED;
   }
 
-  int age = mark->age();
+  uint age = mark->age();
   markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
   markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Wed Oct 03 20:31:41 2012 +0200
@@ -473,7 +473,7 @@
   develop(bool, CleanChunkPoolAsync, falseInEmbedded,                       \
           "Whether to clean the chunk pool asynchronously")                 \
                                                                             \
-  /* Temporary: See 6948537 */                                             \
+  /* Temporary: See 6948537 */                                              \
   experimental(bool, UseMemSetInBOT, true,                                  \
           "(Unstable) uses memset in BOT updates in GC code")               \
                                                                             \
@@ -1626,7 +1626,7 @@
           "Use BinaryTreeDictionary as default in the CMS generation")      \
                                                                             \
   product(uintx, CMSIndexedFreeListReplenish, 4,                            \
-          "Replenish an indexed free list with this number of chunks")     \
+          "Replenish an indexed free list with this number of chunks")      \
                                                                             \
   product(bool, CMSReplenishIntermediate, true,                             \
           "Replenish all intermediate free-list caches")                    \
@@ -2052,7 +2052,7 @@
   product(uintx, TenuredGenerationSizeSupplementDecay, 2,                   \
           "Decay factor to TenuredGenerationSizeIncrement")                 \
                                                                             \
-  product(uintx, MaxGCPauseMillis, max_uintx,                           \
+  product(uintx, MaxGCPauseMillis, max_uintx,                               \
           "Adaptive size policy maximum GC pause time goal in msec, "       \
           "or (G1 Only) the max. GC time per MMU time slice")               \
                                                                             \
@@ -2266,7 +2266,7 @@
   develop(bool, TraceGCTaskQueue, false,                                    \
           "Trace actions of the GC task queues")                            \
                                                                             \
-  diagnostic(bool, TraceGCTaskThread, false,                                   \
+  diagnostic(bool, TraceGCTaskThread, false,                                \
           "Trace actions of the GC task threads")                           \
                                                                             \
   product(bool, PrintParallelOldGCPhaseTimes, false,                        \
@@ -2781,7 +2781,7 @@
   product(intx, SafepointTimeoutDelay, 10000,                               \
           "Delay in milliseconds for option SafepointTimeout")              \
                                                                             \
-  product(intx, NmethodSweepFraction, 16,                                    \
+  product(intx, NmethodSweepFraction, 16,                                   \
           "Number of invocations of sweeper to cover all nmethods")         \
                                                                             \
   product(intx, NmethodSweepCheckInterval, 5,                               \
@@ -2904,7 +2904,7 @@
           "if non-zero, start verifying C heap after Nth call to "          \
           "malloc/realloc/free")                                            \
                                                                             \
-  product(intx, TypeProfileWidth,     2,                                   \
+  product(intx, TypeProfileWidth,     2,                                    \
           "number of receiver types to record in call/cast profile")        \
                                                                             \
   develop(intx, BciProfileWidth,      2,                                    \
@@ -3012,10 +3012,10 @@
   product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K),                \
           "Min change in heap space due to GC (in bytes)")                  \
                                                                             \
-  product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K),             \
+  product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K),            \
           "Min expansion of permanent heap (in bytes)")                     \
                                                                             \
-  product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M),               \
+  product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M),              \
           "Max expansion of permanent heap without full GC (in bytes)")     \
                                                                             \
   product(intx, QueuedAllocationWarningCount, 0,                            \
@@ -3028,10 +3028,10 @@
   diagnostic(intx, VerifyGCLevel,     0,                                    \
           "Generation level at which to start +VerifyBefore/AfterGC")       \
                                                                             \
-  product(intx, MaxTenuringThreshold,    15,                                \
+  product(uintx, MaxTenuringThreshold,    15,                               \
           "Maximum value for tenuring threshold")                           \
                                                                             \
-  product(intx, InitialTenuringThreshold,     7,                            \
+  product(uintx, InitialTenuringThreshold,    7,                            \
           "Initial value for tenuring threshold")                           \
                                                                             \
   product(intx, TargetSurvivorRatio,    50,                                 \
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Oct 03 08:08:52 2012 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Oct 03 20:31:41 2012 +0200
@@ -508,7 +508,7 @@
   nonstatic_field(ContiguousSpace,             _saved_mark_word,                              HeapWord*)                             \
                                                                                                                                      \
   nonstatic_field(DefNewGeneration,            _next_gen,                                     Generation*)                           \
-  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           int)                                   \
+  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                   \
   nonstatic_field(DefNewGeneration,            _age_table,                                    ageTable)                              \
   nonstatic_field(DefNewGeneration,            _eden_space,                                   EdenSpace*)                            \
   nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \