Merge
authorjohnc
Tue, 02 Mar 2010 13:57:46 -0800
changeset 5037 0862ff87566b
parent 5032 785da8592568 (current diff)
parent 5036 7b652cd72d65 (diff)
child 5039 0ab1816a8c6d
Merge
hotspot/src/share/vm/includeDB_core
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -46,9 +46,9 @@
 
   _processor_count = os::active_processor_count();
 
-  if (CMSConcurrentMTEnabled && (ParallelCMSThreads > 1)) {
+  if (CMSConcurrentMTEnabled && (ConcGCThreads > 1)) {
     assert(_processor_count > 0, "Processor count is suspect");
-    _concurrent_processor_count = MIN2((uint) ParallelCMSThreads,
+    _concurrent_processor_count = MIN2((uint) ConcGCThreads,
                                        (uint) _processor_count);
   } else {
     _concurrent_processor_count = 1;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -606,7 +606,7 @@
     assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
   }
 
-  if (!_markStack.allocate(CMSMarkStackSize)) {
+  if (!_markStack.allocate(MarkStackSize)) {
     warning("Failed to allocate CMS Marking Stack");
     return;
   }
@@ -617,13 +617,13 @@
 
   // Support for multi-threaded concurrent phases
   if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) {
-    if (FLAG_IS_DEFAULT(ParallelCMSThreads)) {
+    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
       // just for now
-      FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4);
-    }
-    if (ParallelCMSThreads > 1) {
+      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4);
+    }
+    if (ConcGCThreads > 1) {
       _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads",
-                                 ParallelCMSThreads, true);
+                                 ConcGCThreads, true);
       if (_conc_workers == NULL) {
         warning("GC/CMS: _conc_workers allocation failure: "
               "forcing -CMSConcurrentMTEnabled");
@@ -634,13 +634,13 @@
     }
   }
   if (!CMSConcurrentMTEnabled) {
-    ParallelCMSThreads = 0;
+    ConcGCThreads = 0;
   } else {
     // Turn off CMSCleanOnEnter optimization temporarily for
     // the MT case where it's not fixed yet; see 6178663.
     CMSCleanOnEnter = false;
   }
-  assert((_conc_workers != NULL) == (ParallelCMSThreads > 1),
+  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
          "Inconsistency");
 
   // Parallel task queues; these are shared for the
@@ -648,7 +648,7 @@
   // are not shared with parallel scavenge (ParNew).
   {
     uint i;
-    uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads);
+    uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads);
 
     if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
          || ParallelRefProcEnabled)
@@ -3657,7 +3657,7 @@
   assert(_revisitStack.isEmpty(), "tabula rasa");
   DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());)
   bool result = false;
-  if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) {
+  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
     result = do_marking_mt(asynch);
   } else {
     result = do_marking_st(asynch);
@@ -4174,10 +4174,10 @@
 }
 
 bool CMSCollector::do_marking_mt(bool asynch) {
-  assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition");
+  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
   // In the future this would be determined ergonomically, based
   // on #cpu's, # active mutator threads (and load), and mutation rate.
-  int num_workers = ParallelCMSThreads;
+  int num_workers = ConcGCThreads;
 
   CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
   CompactibleFreeListSpace* perm_space = _permGen->cmsSpace();
@@ -6429,8 +6429,8 @@
 // For now we take the expedient path of just disabling the
 // messages for the problematic case.)
 void CMSMarkStack::expand() {
-  assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted");
-  if (_capacity == CMSMarkStackSizeMax) {
+  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
+  if (_capacity == MarkStackSizeMax) {
     if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) {
       // We print a warning message only once per CMS cycle.
       gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit");
@@ -6438,7 +6438,7 @@
     return;
   }
   // Double capacity if possible
-  size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax);
+  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
   // Do not give up existing stack until we have managed to
   // get the double capacity that we desired.
   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -44,20 +44,20 @@
 {
 
   // Ergomonically select initial concurrent refinement parameters
-  if (FLAG_IS_DEFAULT(G1ConcRefineGreenZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefineGreenZone, MAX2<int>(ParallelGCThreads, 1));
+  if (FLAG_IS_DEFAULT(G1ConcRefinementGreenZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefinementGreenZone, MAX2<int>(ParallelGCThreads, 1));
   }
-  set_green_zone(G1ConcRefineGreenZone);
+  set_green_zone(G1ConcRefinementGreenZone);
 
-  if (FLAG_IS_DEFAULT(G1ConcRefineYellowZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefineYellowZone, green_zone() * 3);
+  if (FLAG_IS_DEFAULT(G1ConcRefinementYellowZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefinementYellowZone, green_zone() * 3);
   }
-  set_yellow_zone(MAX2<int>(G1ConcRefineYellowZone, green_zone()));
+  set_yellow_zone(MAX2<int>(G1ConcRefinementYellowZone, green_zone()));
 
-  if (FLAG_IS_DEFAULT(G1ConcRefineRedZone)) {
-    FLAG_SET_DEFAULT(G1ConcRefineRedZone, yellow_zone() * 2);
+  if (FLAG_IS_DEFAULT(G1ConcRefinementRedZone)) {
+    FLAG_SET_DEFAULT(G1ConcRefinementRedZone, yellow_zone() * 2);
   }
-  set_red_zone(MAX2<int>(G1ConcRefineRedZone, yellow_zone()));
+  set_red_zone(MAX2<int>(G1ConcRefinementRedZone, yellow_zone()));
   _n_worker_threads = thread_num();
   // We need one extra thread to do the young gen rset size sampling.
   _n_threads = _n_worker_threads + 1;
@@ -76,15 +76,15 @@
 }
 
 void ConcurrentG1Refine::reset_threshold_step() {
-  if (FLAG_IS_DEFAULT(G1ConcRefineThresholdStep)) {
+  if (FLAG_IS_DEFAULT(G1ConcRefinementThresholdStep)) {
     _thread_threshold_step = (yellow_zone() - green_zone()) / (worker_thread_num() + 1);
   } else {
-    _thread_threshold_step = G1ConcRefineThresholdStep;
+    _thread_threshold_step = G1ConcRefinementThresholdStep;
   }
 }
 
 int ConcurrentG1Refine::thread_num() {
-  return MAX2<int>((G1ParallelRSetThreads > 0) ? G1ParallelRSetThreads : ParallelGCThreads, 1);
+  return MAX2<int>((G1ConcRefinementThreads > 0) ? G1ConcRefinementThreads : ParallelGCThreads, 1);
 }
 
 void ConcurrentG1Refine::init() {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp	Tue Mar 02 13:57:46 2010 -0800
@@ -39,7 +39,8 @@
   * running. If the length becomes red (max queue length) the mutators start
   * processing the buffers.
   *
-  * There are some interesting cases (with G1AdaptiveConcRefine turned off):
+  * There are some interesting cases (when G1UseAdaptiveConcRefinement
+  * is turned off):
   * 1) green = yellow = red = 0. In this case the mutator will process all
   *    buffers. Except for those that are created by the deferred updates
   *    machinery during a collection.
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -107,7 +107,7 @@
     if (_should_terminate) {
       break;
     }
-    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefineServiceInterval);
+    _monitor->wait(Mutex::_no_safepoint_check_flag, G1ConcRefinementServiceIntervalMillis);
   }
 }
 
@@ -127,7 +127,7 @@
 void ConcurrentG1RefineThread::activate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   if (_worker_id > 0) {
-    if (G1TraceConcurrentRefinement) {
+    if (G1TraceConcRefinement) {
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       gclog_or_tty->print_cr("G1-Refine-activated worker %d, on threshold %d, current %d",
                              _worker_id, _threshold, (int)dcqs.completed_buffers_num());
@@ -143,7 +143,7 @@
 void ConcurrentG1RefineThread::deactivate() {
   MutexLockerEx x(_monitor, Mutex::_no_safepoint_check_flag);
   if (_worker_id > 0) {
-    if (G1TraceConcurrentRefinement) {
+    if (G1TraceConcRefinement) {
       DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
       gclog_or_tty->print_cr("G1-Refine-deactivated worker %d, off threshold %d, current %d",
                              _worker_id, _deactivation_threshold, (int)dcqs.completed_buffers_num());
@@ -218,9 +218,13 @@
 
 
 void ConcurrentG1RefineThread::yield() {
-  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield");
+  if (G1TraceConcRefinement) {
+    gclog_or_tty->print_cr("G1-Refine-yield");
+  }
   _sts.yield("G1 refine");
-  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-yield-end");
+  if (G1TraceConcRefinement) {
+    gclog_or_tty->print_cr("G1-Refine-yield-end");
+  }
 }
 
 void ConcurrentG1RefineThread::stop() {
@@ -241,7 +245,9 @@
       Terminator_lock->wait();
     }
   }
-  if (G1TraceConcurrentRefinement) gclog_or_tty->print_cr("G1-Refine-stop");
+  if (G1TraceConcRefinement) {
+    gclog_or_tty->print_cr("G1-Refine-stop");
+  }
 }
 
 void ConcurrentG1RefineThread::print() const {
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -447,7 +447,7 @@
     gclog_or_tty->print_cr("[global] init, heap start = "PTR_FORMAT", "
                            "heap end = "PTR_FORMAT, _heap_start, _heap_end);
 
-  _markStack.allocate(G1MarkStackSize);
+  _markStack.allocate(MarkStackSize);
   _regionStack.allocate(G1MarkRegionStackSize);
 
   // Create & start a ConcurrentMark thread.
@@ -461,7 +461,7 @@
   assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
 
   SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
-  satb_qs.set_buffer_size(G1SATBLogBufferSize);
+  satb_qs.set_buffer_size(G1SATBBufferSize);
 
   int size = (int) MAX2(ParallelGCThreads, (size_t)1);
   _par_cleanup_thread_state = NEW_C_HEAP_ARRAY(ParCleanupThreadState*, size);
@@ -483,8 +483,8 @@
     _accum_task_vtime[i] = 0.0;
   }
 
-  if (ParallelMarkingThreads > ParallelGCThreads) {
-    vm_exit_during_initialization("Can't have more ParallelMarkingThreads "
+  if (ConcGCThreads > ParallelGCThreads) {
+    vm_exit_during_initialization("Can't have more ConcGCThreads "
                                   "than ParallelGCThreads.");
   }
   if (ParallelGCThreads == 0) {
@@ -494,11 +494,11 @@
     _sleep_factor             = 0.0;
     _marking_task_overhead    = 1.0;
   } else {
-    if (ParallelMarkingThreads > 0) {
-      // notice that ParallelMarkingThreads overwrites G1MarkingOverheadPercent
+    if (ConcGCThreads > 0) {
+      // notice that ConcGCThreads overwrites G1MarkingOverheadPercent
       // if both are set
 
-      _parallel_marking_threads = ParallelMarkingThreads;
+      _parallel_marking_threads = ConcGCThreads;
       _sleep_factor             = 0.0;
       _marking_task_overhead    = 1.0;
     } else if (G1MarkingOverheadPercent > 0) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -583,7 +583,7 @@
            res->zero_fill_state() == HeapRegion::Allocated)),
          "Non-young alloc Regions must be zero filled (and non-H)");
 
-  if (G1PrintRegions) {
+  if (G1PrintHeapRegions) {
     if (res != NULL) {
       gclog_or_tty->print_cr("new alloc region %d:["PTR_FORMAT", "PTR_FORMAT"], "
                              "top "PTR_FORMAT,
@@ -2477,7 +2477,7 @@
   if (G1SummarizeRSetStats) {
     g1_rem_set()->print_summary_info();
   }
-  if (G1SummarizeConcurrentMark) {
+  if (G1SummarizeConcMark) {
     concurrent_mark()->print_summary_info();
   }
   if (G1SummarizeZFStats) {
@@ -3480,7 +3480,7 @@
   HeapRegion* r = heap_region_containing(old);
   if (!r->evacuation_failed()) {
     r->set_evacuation_failed(true);
-    if (G1PrintRegions) {
+    if (G1PrintHeapRegions) {
       gclog_or_tty->print("evacuation failed in heap region "PTR_FORMAT" "
                           "["PTR_FORMAT","PTR_FORMAT")\n",
                           r, r->bottom(), r->end());
@@ -4002,9 +4002,7 @@
       _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
       _g1h->g1_policy()->record_termination_time(i, term_ms);
     }
-    if (G1UseSurvivorSpaces) {
-      _g1h->g1_policy()->record_thread_age_table(pss.age_table());
-    }
+    _g1h->g1_policy()->record_thread_age_table(pss.age_table());
     _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
 
     // Clean up any par-expanded rem sets.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -270,14 +270,10 @@
   _concurrent_mark_cleanup_times_ms->add(0.20);
   _tenuring_threshold = MaxTenuringThreshold;
 
-  if (G1UseSurvivorSpaces) {
-    // if G1FixedSurvivorSpaceSize is 0 which means the size is not
-    // fixed, then _max_survivor_regions will be calculated at
-    // calculate_young_list_target_config during initialization
-    _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
-  } else {
-    _max_survivor_regions = 0;
-  }
+  // if G1FixedSurvivorSpaceSize is 0 which means the size is not
+  // fixed, then _max_survivor_regions will be calculated at
+  // calculate_young_list_target_config during initialization
+  _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
 
   initialize_all();
 }
@@ -296,28 +292,54 @@
   CollectorPolicy::initialize_flags();
 }
 
+// The easiest way to deal with the parsing of the NewSize /
+// MaxNewSize / etc. parameteres is to re-use the code in the
+// TwoGenerationCollectorPolicy class. This is similar to what
+// ParallelScavenge does with its GenerationSizer class (see
+// ParallelScavengeHeap::initialize()). We might change this in the
+// future, but it's a good start.
+class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
+  size_t size_to_region_num(size_t byte_size) {
+    return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
+  }
+
+public:
+  G1YoungGenSizer() {
+    initialize_flags();
+    initialize_size_info();
+  }
+
+  size_t min_young_region_num() {
+    return size_to_region_num(_min_gen0_size);
+  }
+  size_t initial_young_region_num() {
+    return size_to_region_num(_initial_gen0_size);
+  }
+  size_t max_young_region_num() {
+    return size_to_region_num(_max_gen0_size);
+  }
+};
+
 void G1CollectorPolicy::init() {
   // Set aside an initial future to_space.
   _g1 = G1CollectedHeap::heap();
-  size_t regions = Universe::heap()->capacity() / HeapRegion::GrainBytes;
 
   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 
-  if (G1SteadyStateUsed < 50) {
-    vm_exit_during_initialization("G1SteadyStateUsed must be at least 50%.");
-  }
-
   initialize_gc_policy_counters();
 
   if (G1Gen) {
     _in_young_gc_mode = true;
 
-    if (G1YoungGenSize == 0) {
+    G1YoungGenSizer sizer;
+    size_t initial_region_num = sizer.initial_young_region_num();
+
+    if (UseAdaptiveSizePolicy) {
       set_adaptive_young_list_length(true);
       _young_list_fixed_length = 0;
     } else {
       set_adaptive_young_list_length(false);
-      _young_list_fixed_length = (G1YoungGenSize / HeapRegion::GrainBytes);
+      _young_list_fixed_length = initial_region_num;
     }
      _free_regions_at_end_of_collection = _g1->free_regions();
      _scan_only_regions_at_end_of_collection = 0;
@@ -455,7 +477,7 @@
   guarantee( adaptive_young_list_length(), "pre-condition" );
 
   double start_time_sec = os::elapsedTime();
-  size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1MinReservePercent);
+  size_t min_reserve_perc = MAX2((size_t)2, (size_t)G1ReservePercent);
   min_reserve_perc = MIN2((size_t) 50, min_reserve_perc);
   size_t reserve_regions =
     (size_t) ((double) min_reserve_perc * (double) _g1->n_regions() / 100.0);
@@ -1110,10 +1132,7 @@
   size_t short_lived_so_length = _young_list_so_prefix_length;
   _short_lived_surv_rate_group->record_scan_only_prefix(short_lived_so_length);
   tag_scan_only(short_lived_so_length);
-
-  if (G1UseSurvivorSpaces) {
-    _survivors_age_table.clear();
-  }
+  _survivors_age_table.clear();
 
   assert( verify_young_ages(), "region age verification" );
 }
@@ -1432,7 +1451,7 @@
       record_concurrent_mark_init_end_pre(0.0);
 
     size_t min_used_targ =
-      (_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta);
+      (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 
     if (cur_used_bytes > min_used_targ) {
       if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) {
@@ -1916,7 +1935,7 @@
   calculate_young_list_target_config();
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
-  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSUpdatePauseFractionPercent / 100.0;
+  double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
 
   // </NEW PREDICTION>
@@ -1932,7 +1951,7 @@
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
 
-  if (G1AdaptiveConcRefine) {
+  if (G1UseAdaptiveConcRefinement) {
     const int k_gy = 3, k_gr = 6;
     const double inc_k = 1.1, dec_k = 0.9;
 
@@ -2607,9 +2626,6 @@
 // Calculates survivor space parameters.
 void G1CollectorPolicy::calculate_survivors_policy()
 {
-  if (!G1UseSurvivorSpaces) {
-    return;
-  }
   if (G1FixedSurvivorSpaceSize == 0) {
     _max_survivor_regions = _young_list_target_length / SurvivorRatio;
   } else {
@@ -2628,13 +2644,6 @@
 G1CollectorPolicy_BestRegionsFirst::should_do_collection_pause(size_t
                                                                word_size) {
   assert(_g1->regions_accounted_for(), "Region leakage!");
-  // Initiate a pause when we reach the steady-state "used" target.
-  size_t used_hard = (_g1->capacity() / 100) * G1SteadyStateUsed;
-  size_t used_soft =
-   MAX2((_g1->capacity() / 100) * (G1SteadyStateUsed - G1SteadyStateUsedDelta),
-        used_hard/2);
-  size_t used = _g1->used();
-
   double max_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
 
   size_t young_list_length = _g1->young_list_length();
@@ -2867,7 +2876,7 @@
 // estimate of the number of live bytes.
 void G1CollectorPolicy::
 add_to_collection_set(HeapRegion* hr) {
-  if (G1PrintRegions) {
+  if (G1PrintHeapRegions) {
     gclog_or_tty->print_cr("added region to cset %d:["PTR_FORMAT", "PTR_FORMAT"], "
                   "top "PTR_FORMAT", young %s",
                   hr->hrs_index(), hr->bottom(), hr->end(),
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -88,13 +88,13 @@
     //     the time slice than what's allowed)
     //   consolidate the two entries with the minimum gap between them
     //     (this might allow less GC time than what's allowed)
-    guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
-              "array full, currently we can't recover unless +G1ForgetfulMMUTracker");
+    guarantee(NOT_PRODUCT(ScavengeALot ||) G1UseFixedWindowMMUTracker,
+              "array full, currently we can't recover unless +G1UseFixedWindowMMUTracker");
     // In the case where ScavengeALot is true, such overflow is not
     // uncommon; in such cases, we can, without much loss of precision
     // or performance (we are GC'ing most of the time anyway!),
     // simply overwrite the oldest entry in the tracker: this
-    // is also the behaviour when G1ForgetfulMMUTracker is enabled.
+    // is also the behaviour when G1UseFixedWindowMMUTracker is enabled.
     _head_index = trim_index(_head_index + 1);
     assert(_head_index == _tail_index, "Because we have a full circular buffer");
     _tail_index = trim_index(_tail_index + 1);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1MMUTracker.hpp	Tue Mar 02 13:57:46 2010 -0800
@@ -101,7 +101,7 @@
   // If the array is full, an easy fix is to look for the pauses with
   // the shortest gap between them and consolidate them.
   // For now, we have taken the expedient alternative of forgetting
-  // the oldest entry in the event that +G1ForgetfulMMUTracker, thus
+  // the oldest entry in the event that +G1UseFixedWindowMMUTracker, thus
   // potentially violating MMU specs for some time thereafter.
 
   G1MMUTrackerQueueElem _array[QueueLength];
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -467,7 +467,7 @@
     // and they are causing failures. When we resolve said race
     // conditions, we'll revert back to parallel remembered set
     // updating and scanning. See CRs 6677707 and 6677708.
-    if (G1ParallelRSetUpdatingEnabled || (worker_i == 0)) {
+    if (G1UseParallelRSetUpdating || (worker_i == 0)) {
       updateRS(worker_i);
       scanNewRefsRS(oc, worker_i);
     } else {
@@ -476,7 +476,7 @@
       _g1p->record_update_rs_time(worker_i, 0.0);
       _g1p->record_scan_new_refs_time(worker_i, 0.0);
     }
-    if (G1ParallelRSetScanningEnabled || (worker_i == 0)) {
+    if (G1UseParallelRSetScanning || (worker_i == 0)) {
       scanRS(oc, worker_i);
     } else {
       _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Mar 02 13:57:46 2010 -0800
@@ -37,9 +37,6 @@
   develop(intx, G1MarkingOverheadPercent, 0,                                \
           "Overhead of concurrent marking")                                 \
                                                                             \
-  product(uintx, G1YoungGenSize, 0,                                         \
-          "Size of the G1 young generation, 0 is the adaptive policy")      \
-                                                                            \
   develop(bool, G1Gen, true,                                                \
           "If true, it will enable the generational G1")                    \
                                                                             \
@@ -70,7 +67,7 @@
   develop(intx, G1PausesBtwnConcMark, -1,                                   \
           "If positive, fixed number of pauses between conc markings")      \
                                                                             \
-  diagnostic(bool, G1SummarizeConcurrentMark, false,                        \
+  diagnostic(bool, G1SummarizeConcMark, false,                              \
           "Summarize concurrent mark info")                                 \
                                                                             \
   diagnostic(bool, G1SummarizeRSetStats, false,                             \
@@ -85,12 +82,9 @@
   diagnostic(bool, G1SummarizeZFStats, false,                               \
           "Summarize zero-filling info")                                    \
                                                                             \
-  diagnostic(bool, G1TraceConcurrentRefinement, false,                      \
+  diagnostic(bool, G1TraceConcRefinement, false,                            \
           "Trace G1 concurrent refinement")                                 \
                                                                             \
-  product(intx, G1MarkStackSize, 2 * 1024 * 1024,                           \
-          "Size of the mark stack for concurrent marking.")                 \
-                                                                            \
   product(intx, G1MarkRegionStackSize, 1024 * 1024,                         \
           "Size of the region stack for concurrent marking.")               \
                                                                             \
@@ -100,20 +94,13 @@
   develop(intx, G1ConcZFMaxRegions, 1,                                      \
           "Stop zero-filling when # of zf'd regions reaches")               \
                                                                             \
-  product(intx, G1SteadyStateUsed, 90,                                      \
-          "If non-0, try to maintain 'used' at this pct (of max)")          \
-                                                                            \
-  product(intx, G1SteadyStateUsedDelta, 30,                                 \
-          "If G1SteadyStateUsed is non-0, then do pause this number of "    \
-          "of percentage points earlier if no marking is in progress.")     \
-                                                                            \
   develop(bool, G1SATBBarrierPrintNullPreVals, false,                       \
           "If true, count frac of ptr writes with null pre-vals.")          \
                                                                             \
-  product(intx, G1SATBLogBufferSize, 1*K,                                   \
+  product(intx, G1SATBBufferSize, 1*K,                                      \
           "Number of entries in an SATB log buffer.")                       \
                                                                             \
-  product(intx, G1SATBProcessCompletedThreshold, 20,                        \
+  develop(intx, G1SATBProcessCompletedThreshold, 20,                        \
           "Number of completed buffers that triggers log processing.")      \
                                                                             \
   develop(intx, G1ExtraRegionSurvRate, 33,                                  \
@@ -127,7 +114,7 @@
   develop(bool, G1SATBPrintStubs, false,                                    \
           "If true, print generated stubs for the SATB barrier")            \
                                                                             \
-  product(intx, G1ExpandByPercentOfAvailable, 20,                           \
+  experimental(intx, G1ExpandByPercentOfAvailable, 20,                      \
           "When expanding, % of uncommitted space to claim.")               \
                                                                             \
   develop(bool, G1RSBarrierRegionFilter, true,                              \
@@ -165,36 +152,36 @@
   product(intx, G1UpdateBufferSize, 256,                                    \
           "Size of an update buffer")                                       \
                                                                             \
-  product(intx, G1ConcRefineYellowZone, 0,                                  \
+  product(intx, G1ConcRefinementYellowZone, 0,                              \
           "Number of enqueued update buffers that will "                    \
           "trigger concurrent processing. Will be selected ergonomically "  \
           "by default.")                                                    \
                                                                             \
-  product(intx, G1ConcRefineRedZone, 0,                                     \
+  product(intx, G1ConcRefinementRedZone, 0,                                 \
           "Maximum number of enqueued update buffers before mutator "       \
           "threads start processing new ones instead of enqueueing them. "  \
           "Will be selected ergonomically by default. Zero will disable "   \
           "concurrent processing.")                                         \
                                                                             \
-  product(intx, G1ConcRefineGreenZone, 0,                                   \
+  product(intx, G1ConcRefinementGreenZone, 0,                               \
           "The number of update buffers that are left in the queue by the " \
           "concurrent processing threads. Will be selected ergonomically "  \
           "by default.")                                                    \
                                                                             \
-  product(intx, G1ConcRefineServiceInterval, 300,                           \
+  product(intx, G1ConcRefinementServiceIntervalMillis, 300,                 \
           "The last concurrent refinement thread wakes up every "           \
           "specified number of milliseconds to do miscellaneous work.")     \
                                                                             \
-  product(intx, G1ConcRefineThresholdStep, 0,                               \
+  product(intx, G1ConcRefinementThresholdStep, 0,                           \
           "Each time the rset update queue increases by this amount "       \
           "activate the next refinement thread if available. "              \
           "Will be selected ergonomically by default.")                     \
                                                                             \
-  product(intx, G1RSUpdatePauseFractionPercent, 10,                         \
+  product(intx, G1RSetUpdatingPauseTimePercent, 10,                         \
           "A target percentage of time that is allowed to be spend on "     \
           "process RS update buffers during the collection pause.")         \
                                                                             \
-  product(bool, G1AdaptiveConcRefine, true,                                 \
+  product(bool, G1UseAdaptiveConcRefinement, true,                          \
           "Select green, yellow and red zones adaptively to meet the "      \
           "the pause requirements.")                                        \
                                                                             \
@@ -245,15 +232,15 @@
           "the number of regions for which we'll print a surv rate "        \
           "summary.")                                                       \
                                                                             \
-  product(bool, G1UseScanOnlyPrefix, false,                                 \
+  develop(bool, G1UseScanOnlyPrefix, false,                                 \
           "It determines whether the system will calculate an optimum "     \
           "scan-only set.")                                                 \
                                                                             \
-  product(intx, G1MinReservePercent, 10,                                    \
+  product(intx, G1ReservePercent, 10,                                       \
           "It determines the minimum reserve we should have in the heap "   \
           "to minimize the probability of promotion failure.")              \
                                                                             \
-  diagnostic(bool, G1PrintRegions, false,                                   \
+  diagnostic(bool, G1PrintHeapRegions, false,                               \
           "If set G1 will print information on which regions are being "    \
           "allocated and which are reclaimed.")                             \
                                                                             \
@@ -263,9 +250,6 @@
   develop(bool, G1HRRSFlushLogBuffersOnVerify, false,                       \
           "Forces flushing of log buffers before verification.")            \
                                                                             \
-  product(bool, G1UseSurvivorSpaces, true,                                  \
-          "When true, use survivor space.")                                 \
-                                                                            \
   develop(bool, G1FailOnFPError, false,                                     \
           "When set, G1 will fail when it encounters an FP 'error', "       \
           "so as to allow debugging")                                       \
@@ -280,21 +264,21 @@
           "If non-0 is the size of the G1 survivor space, "                 \
           "otherwise SurvivorRatio is used to determine the size")          \
                                                                             \
-  product(bool, G1ForgetfulMMUTracker, false,                               \
+  product(bool, G1UseFixedWindowMMUTracker, false,                          \
           "If the MMU tracker's memory is full, forget the oldest entry")   \
                                                                             \
   product(uintx, G1HeapRegionSize, 0,                                       \
           "Size of the G1 regions.")                                        \
                                                                             \
-  experimental(bool, G1ParallelRSetUpdatingEnabled, false,                  \
+  experimental(bool, G1UseParallelRSetUpdating, false,                      \
           "Enables the parallelization of remembered set updating "         \
           "during evacuation pauses")                                       \
                                                                             \
-  experimental(bool, G1ParallelRSetScanningEnabled, false,                  \
+  experimental(bool, G1UseParallelRSetScanning, false,                      \
           "Enables the parallelization of remembered set scanning "         \
           "during evacuation pauses")                                       \
                                                                             \
-  product(uintx, G1ParallelRSetThreads, 0,                                  \
+  product(uintx, G1ConcRefinementThreads, 0,                                \
           "If non-0 is the number of parallel rem set update threads, "     \
           "otherwise the value is determined ergonomically.")               \
                                                                             \
--- a/hotspot/src/share/vm/includeDB_core	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/includeDB_core	Tue Mar 02 13:57:46 2010 -0800
@@ -176,6 +176,7 @@
 arguments.cpp                           oop.inline.hpp
 arguments.cpp                           os_<os_family>.inline.hpp
 arguments.cpp                           referenceProcessor.hpp
+arguments.cpp                           taskqueue.hpp
 arguments.cpp                           universe.inline.hpp
 arguments.cpp                           vm_version_<arch>.hpp
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Tue Mar 02 13:57:46 2010 -0800
@@ -1203,6 +1203,11 @@
   if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
     CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
   }
+  if (PrintGCDetails && Verbose) {
+    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
+      MarkStackSize / K, MarkStackSizeMax / K);
+    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
+  }
 }
 #endif // KERNEL
 
@@ -1339,6 +1344,17 @@
   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
   }
+
+  if (FLAG_IS_DEFAULT(MarkStackSize)) {
+    // Size as a multiple of TaskQueueSuper::N which is larger
+    // for 64-bit.
+    FLAG_SET_DEFAULT(MarkStackSize, 128 * TaskQueueSuper::total_size());
+  }
+  if (PrintGCDetails && Verbose) {
+    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
+      MarkStackSize / K, MarkStackSizeMax / K);
+    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
+  }
 }
 
 void Arguments::set_heap_size() {
@@ -1737,6 +1753,11 @@
     status = false;
   }
 
+  if (UseG1GC) {
+    status = status && verify_percentage(InitiatingHeapOccupancyPercent,
+                                         "InitiatingHeapOccupancyPercent");
+  }
+
   status = status && verify_interval(RefDiscoveryPolicy,
                                      ReferenceProcessor::DiscoveryPolicyMin,
                                      ReferenceProcessor::DiscoveryPolicyMax,
@@ -1795,6 +1816,29 @@
   return false;
 }
 
+bool Arguments::parse_uintx(const char* value,
+                            uintx* uintx_arg,
+                            uintx min_size) {
+
+  // Check the sign first since atomull() parses only unsigned values.
+  bool value_is_positive = !(*value == '-');
+
+  if (value_is_positive) {
+    julong n;
+    bool good_return = atomull(value, &n);
+    if (good_return) {
+      bool above_minimum = n >= min_size;
+      bool value_is_too_large = n > max_uintx;
+
+      if (above_minimum && !value_is_too_large) {
+        *uintx_arg = n;
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 Arguments::ArgsRange Arguments::parse_memory_size(const char* s,
                                                   julong* long_arg,
                                                   julong min_size) {
@@ -2453,6 +2497,37 @@
       jio_fprintf(defaultStream::error_stream(),
                   "Please use -XX:YoungPLABSize in place of "
                   "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
+    } else if (match_option(option, "-XX:CMSMarkStackSize=", &tail) ||
+               match_option(option, "-XX:G1MarkStackSize=", &tail)) {
+      julong stack_size = 0;
+      ArgsRange errcode = parse_memory_size(tail, &stack_size, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid mark stack size: %s\n", option->optionString);
+        describe_range_error(errcode);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
+    } else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
+      julong max_stack_size = 0;
+      ArgsRange errcode = parse_memory_size(tail, &max_stack_size, 1);
+      if (errcode != arg_in_range) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid maximum mark stack size: %s\n",
+                    option->optionString);
+        describe_range_error(errcode);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
+    } else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
+               match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
+      uintx conc_threads = 0;
+      if (!parse_uintx(tail, &conc_threads, 1)) {
+        jio_fprintf(defaultStream::error_stream(),
+                    "Invalid concurrent threads: %s\n", option->optionString);
+        return JNI_EINVAL;
+      }
+      FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
     } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
       // Skip -XX:Flags= since that case has already been handled
       if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
--- a/hotspot/src/share/vm/runtime/arguments.hpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.hpp	Tue Mar 02 13:57:46 2010 -0800
@@ -343,6 +343,12 @@
   static ArgsRange check_memory_size(julong size, julong min_size);
   static ArgsRange parse_memory_size(const char* s, julong* long_arg,
                                      julong min_size);
+  // Parse a string for a unsigned integer.  Returns true if value
+  // is an unsigned integer greater than or equal to the minimum
+  // parameter passed and returns the value in uintx_arg.  Returns
+  // false otherwise, with uintx_arg undefined.
+  static bool parse_uintx(const char* value, uintx* uintx_arg,
+                          uintx min_size);
 
   // methods to build strings from individual args
   static void build_jvm_args(const char* arg);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Tue Mar 02 13:57:46 2010 -0800
@@ -1245,9 +1245,6 @@
   product(uintx, ParallelGCThreads, 0,                                      \
           "Number of parallel threads parallel gc will use")                \
                                                                             \
-  product(uintx, ParallelCMSThreads, 0,                                     \
-          "Max number of threads CMS will use for concurrent work")         \
-                                                                            \
   develop(bool, ParallelOldGCSplitALot, false,                              \
           "Provoke splitting (copying data from a young gen space to"       \
           "multiple destination spaces)")                                   \
@@ -1258,8 +1255,8 @@
   develop(bool, TraceRegionTasksQueuing, false,                             \
           "Trace the queuing of the region tasks")                          \
                                                                             \
-  product(uintx, ParallelMarkingThreads, 0,                                 \
-          "Number of marking threads concurrent gc will use")               \
+  product(uintx, ConcGCThreads, 0,                                          \
+          "Number of threads concurrent gc will use")                       \
                                                                             \
   product(uintx, YoungPLABSize, 4096,                                       \
           "Size of young gen promotion labs (in HeapWords)")                \
@@ -1535,11 +1532,11 @@
   develop(bool, CMSOverflowEarlyRestoration, false,                         \
           "Whether preserved marks should be restored early")               \
                                                                             \
-  product(uintx, CMSMarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),           \
-          "Size of CMS marking stack")                                      \
-                                                                            \
-  product(uintx, CMSMarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),       \
-          "Max size of CMS marking stack")                                  \
+  product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),              \
+          "Size of marking stack")                                          \
+                                                                            \
+  product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),          \
+          "Max size of marking stack")                                      \
                                                                             \
   notproduct(bool, CMSMarkStackOverflowALot, false,                         \
           "Whether we should simulate frequent marking stack / work queue"  \
@@ -1724,6 +1721,13 @@
           "Percentage CMS generation occupancy to start a CMS collection "  \
           "cycle. A negative value means that CMSTriggerRatio is used")     \
                                                                             \
+  product(uintx, InitiatingHeapOccupancyPercent, 45,                        \
+          "Percentage of the (entire) heap occupancy to start a "           \
+          "concurrent GC cycle. It us used by GCs that trigger a "          \
+          "concurrent GC cycle based on the occupancy of the entire heap, " \
+          "not just one of the generations (e.g., G1). A value of 0 "       \
+          "denotes 'do constant GC cycles'.")                               \
+                                                                            \
   product(intx, CMSInitiatingPermOccupancyFraction, -1,                     \
           "Percentage CMS perm generation occupancy to start a "            \
           "CMScollection cycle. A negative value means that "               \
--- a/hotspot/src/share/vm/utilities/taskqueue.hpp	Mon Mar 01 12:12:35 2010 -0800
+++ b/hotspot/src/share/vm/utilities/taskqueue.hpp	Tue Mar 02 13:57:46 2010 -0800
@@ -133,6 +133,9 @@
   // Maximum number of elements allowed in the queue.  This is two less
   // than the actual queue size, for somewhat complicated reasons.
   uint max_elems() { return N - 2; }
+
+  // Total size of queue.
+  static const uint total_size() { return N; }
 };
 
 template<class E> class GenericTaskQueue: public TaskQueueSuper {