Merge
authortonyp
Tue, 22 Nov 2011 04:47:10 -0500
changeset 11173 af2bc14f35f8
parent 11164 8273870ef2ca (current diff)
parent 11172 f720721985ba (diff)
child 11174 fccee5238e70
Merge
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Nov 22 04:47:10 2011 -0500
@@ -44,7 +44,7 @@
 //
 // CMS Bit Map Wrapper
 
-CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter):
+CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
   _bm((uintptr_t*)NULL,0),
   _shifter(shifter) {
   _bmStartWord = (HeapWord*)(rs.base());
@@ -1530,10 +1530,42 @@
                              FreeRegionList* local_cleanup_list,
                              OldRegionSet* old_proxy_set,
                              HumongousRegionSet* humongous_proxy_set,
-                             HRRSCleanupTask* hrrs_cleanup_task);
+                             HRRSCleanupTask* hrrs_cleanup_task) :
+    _g1(g1), _worker_num(worker_num),
+    _max_live_bytes(0), _regions_claimed(0),
+    _freed_bytes(0),
+    _claimed_region_time(0.0), _max_region_time(0.0),
+    _local_cleanup_list(local_cleanup_list),
+    _old_proxy_set(old_proxy_set),
+    _humongous_proxy_set(humongous_proxy_set),
+    _hrrs_cleanup_task(hrrs_cleanup_task) { }
+
   size_t freed_bytes() { return _freed_bytes; }
 
-  bool doHeapRegion(HeapRegion *r);
+  bool doHeapRegion(HeapRegion *hr) {
+    // We use a claim value of zero here because all regions
+    // were claimed with value 1 in the FinalCount task.
+    hr->reset_gc_time_stamp();
+    if (!hr->continuesHumongous()) {
+      double start = os::elapsedTime();
+      _regions_claimed++;
+      hr->note_end_of_marking();
+      _max_live_bytes += hr->max_live_bytes();
+      _g1->free_region_if_empty(hr,
+                                &_freed_bytes,
+                                _local_cleanup_list,
+                                _old_proxy_set,
+                                _humongous_proxy_set,
+                                _hrrs_cleanup_task,
+                                true /* par */);
+      double region_time = (os::elapsedTime() - start);
+      _claimed_region_time += region_time;
+      if (region_time > _max_region_time) {
+        _max_region_time = region_time;
+      }
+    }
+    return false;
+  }
 
   size_t max_live_bytes() { return _max_live_bytes; }
   size_t regions_claimed() { return _regions_claimed; }
@@ -1644,47 +1676,6 @@
 
 };
 
-G1NoteEndOfConcMarkClosure::
-G1NoteEndOfConcMarkClosure(G1CollectedHeap* g1,
-                           int worker_num,
-                           FreeRegionList* local_cleanup_list,
-                           OldRegionSet* old_proxy_set,
-                           HumongousRegionSet* humongous_proxy_set,
-                           HRRSCleanupTask* hrrs_cleanup_task)
-  : _g1(g1), _worker_num(worker_num),
-    _max_live_bytes(0), _regions_claimed(0),
-    _freed_bytes(0),
-    _claimed_region_time(0.0), _max_region_time(0.0),
-    _local_cleanup_list(local_cleanup_list),
-    _old_proxy_set(old_proxy_set),
-    _humongous_proxy_set(humongous_proxy_set),
-    _hrrs_cleanup_task(hrrs_cleanup_task) { }
-
-bool G1NoteEndOfConcMarkClosure::doHeapRegion(HeapRegion *hr) {
-  // We use a claim value of zero here because all regions
-  // were claimed with value 1 in the FinalCount task.
-  hr->reset_gc_time_stamp();
-  if (!hr->continuesHumongous()) {
-    double start = os::elapsedTime();
-    _regions_claimed++;
-    hr->note_end_of_marking();
-    _max_live_bytes += hr->max_live_bytes();
-    _g1->free_region_if_empty(hr,
-                              &_freed_bytes,
-                              _local_cleanup_list,
-                              _old_proxy_set,
-                              _humongous_proxy_set,
-                              _hrrs_cleanup_task,
-                              true /* par */);
-    double region_time = (os::elapsedTime() - start);
-    _claimed_region_time += region_time;
-    if (region_time > _max_region_time) {
-      _max_region_time = region_time;
-    }
-  }
-  return false;
-}
-
 void ConcurrentMark::cleanup() {
   // world is stopped at this checkpoint
   assert(SafepointSynchronize::is_at_safepoint(),
@@ -1991,16 +1982,12 @@
 class G1CMParKeepAliveAndDrainClosure: public OopClosure {
   ConcurrentMark*  _cm;
   CMTask*          _task;
-  CMBitMap*        _bitMap;
   int              _ref_counter_limit;
   int              _ref_counter;
  public:
-  G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm,
-                                  CMTask* task,
-                                  CMBitMap* bitMap) :
-    _cm(cm), _task(task), _bitMap(bitMap),
-    _ref_counter_limit(G1RefProcDrainInterval)
-  {
+  G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm, CMTask* task) :
+    _cm(cm), _task(task),
+    _ref_counter_limit(G1RefProcDrainInterval) {
     assert(_ref_counter_limit > 0, "sanity");
     _ref_counter = _ref_counter_limit;
   }
@@ -2091,19 +2078,16 @@
 private:
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
-  CMBitMap*        _bitmap;
   WorkGang*        _workers;
   int              _active_workers;
 
 public:
   G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
                         ConcurrentMark* cm,
-                        CMBitMap* bitmap,
                         WorkGang* workers,
                         int n_workers) :
-    _g1h(g1h), _cm(cm), _bitmap(bitmap),
-    _workers(workers), _active_workers(n_workers)
-  { }
+    _g1h(g1h), _cm(cm),
+    _workers(workers), _active_workers(n_workers) { }
 
   // Executes the given task using concurrent marking worker threads.
   virtual void execute(ProcessTask& task);
@@ -2115,21 +2099,18 @@
   ProcessTask&     _proc_task;
   G1CollectedHeap* _g1h;
   ConcurrentMark*  _cm;
-  CMBitMap*        _bitmap;
 
 public:
   G1CMRefProcTaskProxy(ProcessTask& proc_task,
                      G1CollectedHeap* g1h,
-                     ConcurrentMark* cm,
-                     CMBitMap* bitmap) :
+                     ConcurrentMark* cm) :
     AbstractGangTask("Process reference objects in parallel"),
-    _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
-  {}
+    _proc_task(proc_task), _g1h(g1h), _cm(cm) { }
 
   virtual void work(int i) {
     CMTask* marking_task = _cm->task(i);
     G1CMIsAliveClosure g1_is_alive(_g1h);
-    G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
+    G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task);
     G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
 
     _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
@@ -2139,7 +2120,7 @@
 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
   assert(_workers != NULL, "Need parallel worker threads.");
 
-  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
+  G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm);
 
   // We need to reset the phase for each task execution so that
   // the termination protocol of CMTask::do_marking_step works.
@@ -2156,8 +2137,7 @@
 public:
   G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
     AbstractGangTask("Enqueue reference objects in parallel"),
-    _enq_task(enq_task)
-  { }
+    _enq_task(enq_task) { }
 
   virtual void work(int i) {
     _enq_task.work(i);
@@ -2210,7 +2190,7 @@
     int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
     active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
 
-    G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
+    G1CMRefProcTaskExecutor par_task_executor(g1h, this,
                                               g1h->workers(), active_workers);
 
     if (rp->processing_is_mt()) {
@@ -3064,12 +3044,13 @@
     g1h->collection_set_iterate(&cmplt);
     if (cmplt.completed()) break;
   }
+
+  ClearMarksInHRClosure clr(nextMarkBitMap());
+  g1h->collection_set_iterate(&clr);
+
   double end_time = os::elapsedTime();
   double elapsed_time_ms = (end_time - start) * 1000.0;
   g1h->g1_policy()->record_mark_closure_time(elapsed_time_ms);
-
-  ClearMarksInHRClosure clr(nextMarkBitMap());
-  g1h->collection_set_iterate(&clr);
 }
 
 // The next two methods deal with the following optimisation. Some
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Nov 22 04:47:10 2011 -0500
@@ -176,8 +176,7 @@
   hr->set_next_young_region(_head);
   _head = hr;
 
-  hr->set_young();
-  double yg_surv_rate = _g1h->g1_policy()->predict_yg_surv_rate((int)_length);
+  _g1h->g1_policy()->set_region_eden(hr, (int) _length);
   ++_length;
 }
 
@@ -190,7 +189,6 @@
     _survivor_tail = hr;
   }
   _survivor_head = hr;
-
   ++_survivor_length;
 }
 
@@ -315,16 +313,20 @@
   _g1h->g1_policy()->note_start_adding_survivor_regions();
   _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
 
+  int young_index_in_cset = 0;
   for (HeapRegion* curr = _survivor_head;
        curr != NULL;
        curr = curr->get_next_young_region()) {
-    _g1h->g1_policy()->set_region_survivors(curr);
+    _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
 
     // The region is a non-empty survivor so let's add it to
     // the incremental collection set for the next evacuation
     // pause.
     _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
-  }
+    young_index_in_cset += 1;
+  }
+  assert((size_t) young_index_in_cset == _survivor_length,
+         "post-condition");
   _g1h->g1_policy()->note_stop_adding_survivor_regions();
 
   _head   = _survivor_head;
@@ -3210,8 +3212,6 @@
   }
 }
 
-// <NEW PREDICTION>
-
 double G1CollectedHeap::predict_region_elapsed_time_ms(HeapRegion *hr,
                                                        bool young) {
   return _g1_policy->predict_region_elapsed_time_ms(hr, young);
@@ -3251,7 +3251,7 @@
 void
 G1CollectedHeap::setup_surviving_young_words() {
   guarantee( _surviving_young_words == NULL, "pre-condition" );
-  size_t array_length = g1_policy()->young_cset_length();
+  size_t array_length = g1_policy()->young_cset_region_length();
   _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, array_length);
   if (_surviving_young_words == NULL) {
     vm_exit_out_of_memory(sizeof(size_t) * array_length,
@@ -3268,7 +3268,7 @@
 void
 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
   MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-  size_t array_length = g1_policy()->young_cset_length();
+  size_t array_length = g1_policy()->young_cset_region_length();
   for (size_t i = 0; i < array_length; ++i)
     _surviving_young_words[i] += surv_young_words[i];
 }
@@ -3280,8 +3280,6 @@
   _surviving_young_words = NULL;
 }
 
-// </NEW PREDICTION>
-
 #ifdef ASSERT
 class VerifyCSetClosure: public HeapRegionClosure {
 public:
@@ -4158,7 +4156,7 @@
   // non-young regions (where the age is -1)
   // We also add a few elements at the beginning and at the end in
   // an attempt to eliminate cache contention
-  size_t real_length = 1 + _g1h->g1_policy()->young_cset_length();
+  size_t real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
   size_t array_length = PADDING_ELEM_NUM +
                         real_length +
                         PADDING_ELEM_NUM;
@@ -5595,8 +5593,8 @@
 
     if (cur->is_young()) {
       int index = cur->young_index_in_cset();
-      guarantee( index != -1, "invariant" );
-      guarantee( (size_t)index < policy->young_cset_length(), "invariant" );
+      assert(index != -1, "invariant");
+      assert((size_t) index < policy->young_cset_region_length(), "invariant");
       size_t words_survived = _surviving_young_words[index];
       cur->record_surv_words_in_group(words_survived);
 
@@ -5607,7 +5605,7 @@
       cur->set_next_young_region(NULL);
     } else {
       int index = cur->young_index_in_cset();
-      guarantee( index == -1, "invariant" );
+      assert(index == -1, "invariant");
     }
 
     assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
@@ -5620,8 +5618,9 @@
       free_region(cur, &pre_used, &local_free_list, false /* par */);
     } else {
       cur->uninstall_surv_rate_group();
-      if (cur->is_young())
+      if (cur->is_young()) {
         cur->set_young_index_in_cset(-1);
+      }
       cur->set_not_young();
       cur->set_evacuation_failed(false);
       // The region is now considered to be old.
@@ -5722,7 +5721,6 @@
   assert(heap_lock_held_for_gc(),
               "the heap lock should already be held by or for this thread");
   _young_list->push_region(hr);
-  g1_policy()->set_region_short_lived(hr);
 }
 
 class NoYoungRegionsClosure: public HeapRegionClosure {
@@ -5880,7 +5878,6 @@
     HeapRegion* new_alloc_region = new_region(word_size,
                                               false /* do_expand */);
     if (new_alloc_region != NULL) {
-      g1_policy()->update_region_num(true /* next_is_young */);
       set_region_short_lived_locked(new_alloc_region);
       _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
       return new_alloc_region;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Nov 22 04:47:10 2011 -0500
@@ -1610,16 +1610,12 @@
 public:
   void stop_conc_gc_threads();
 
-  // <NEW PREDICTION>
-
   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
   void check_if_region_is_too_expensive(double predicted_time_ms);
   size_t pending_card_num();
   size_t max_pending_card_num();
   size_t cards_scanned();
 
-  // </NEW PREDICTION>
-
 protected:
   size_t _max_heap_capacity;
 };
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Nov 22 04:47:10 2011 -0500
@@ -36,10 +36,6 @@
 #include "runtime/mutexLocker.hpp"
 #include "utilities/debug.hpp"
 
-#define PREDICTIONS_VERBOSE 0
-
-// <NEW PREDICTION>
-
 // Different defaults for different number of GC threads
 // They were chosen by running GCOld and SPECjbb on debris with different
 //   numbers of GC threads and choosing them based on the results
@@ -80,8 +76,6 @@
   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
 };
 
-// </NEW PREDICTION>
-
 // Help class for avoiding interleaved logging
 class LineBuffer: public StackObj {
 
@@ -137,10 +131,6 @@
   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
                         ? ParallelGCThreads : 1),
 
-  _n_pauses(0),
-  _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
-  _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
-  _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _all_pause_times_ms(new NumberSeq()),
   _stop_world_start(0.0),
@@ -148,8 +138,6 @@
   _all_yield_times_ms(new NumberSeq()),
   _using_new_ratio_calculations(false),
 
-  _all_mod_union_times_ms(new NumberSeq()),
-
   _summary(new Summary()),
 
   _cur_clear_ct_time_ms(0.0),
@@ -165,11 +153,6 @@
   _num_cc_clears(0L),
 #endif
 
-  _region_num_young(0),
-  _region_num_tenured(0),
-  _prev_region_num_young(0),
-  _prev_region_num_tenured(0),
-
   _aux_num(10),
   _all_aux_times_ms(new NumberSeq[_aux_num]),
   _cur_aux_start_times_ms(new double[_aux_num]),
@@ -179,8 +162,6 @@
   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 
-  // <NEW PREDICTION>
-
   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
   _prev_collection_pause_end_ms(0.0),
   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
@@ -199,13 +180,10 @@
                                          new TruncatedSeq(TruncatedSeqLength)),
 
   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
-  _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 
   _pause_time_target_ms((double) MaxGCPauseMillis),
 
-  // </NEW PREDICTION>
-
   _full_young_gcs(true),
   _full_young_pause_num(0),
   _partial_young_pause_num(0),
@@ -221,16 +199,10 @@
 
    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 
-  _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
-  _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
-
   _recent_avg_pause_time_ratio(0.0),
 
   _all_full_gc_times_ms(new NumberSeq()),
 
-  // G1PausesBtwnConcMark defaults to -1
-  // so the hack is to do the cast  QQQ FIXME
-  _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark),
   _initiate_conc_mark_if_possible(false),
   _during_initial_mark_pause(false),
   _should_revert_to_full_young_gcs(false),
@@ -242,22 +214,21 @@
 
   _prev_collection_pause_used_at_end_bytes(0),
 
+  _eden_cset_region_length(0),
+  _survivor_cset_region_length(0),
+  _old_cset_region_length(0),
+
   _collection_set(NULL),
-  _collection_set_size(0),
   _collection_set_bytes_used_before(0),
 
   // Incremental CSet attributes
   _inc_cset_build_state(Inactive),
   _inc_cset_head(NULL),
   _inc_cset_tail(NULL),
-  _inc_cset_size(0),
-  _inc_cset_young_index(0),
   _inc_cset_bytes_used_before(0),
   _inc_cset_max_finger(NULL),
-  _inc_cset_recorded_young_bytes(0),
   _inc_cset_recorded_rs_lengths(0),
   _inc_cset_predicted_elapsed_time_ms(0.0),
-  _inc_cset_predicted_bytes_to_copy(0),
 
 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
@@ -325,8 +296,6 @@
   // start conservatively
   _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis;
 
-  // <NEW PREDICTION>
-
   int index;
   if (ParallelGCThreads == 0)
     index = 0;
@@ -348,8 +317,6 @@
   _non_young_other_cost_per_region_ms_seq->add(
                            non_young_other_cost_per_region_ms_defaults[index]);
 
-  // </NEW PREDICTION>
-
   // Below, we might need to calculate the pause time target based on
   // the pause interval. When we do so we are going to give G1 maximum
   // flexibility and allow it to do pauses when it needs to. So, we'll
@@ -908,9 +875,6 @@
 
   record_survivor_regions(0, NULL, NULL);
 
-  _prev_region_num_young   = _region_num_young;
-  _prev_region_num_tenured = _region_num_tenured;
-
   _free_regions_at_end_of_collection = _g1->free_regions();
   // Reset survivors SurvRateGroup.
   _survivor_surv_rate_group->reset();
@@ -1168,8 +1132,10 @@
   double end_time_sec = os::elapsedTime();
   double elapsed_ms = _last_pause_time_ms;
   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
+  assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
+         "otherwise, the subtraction below does not make sense");
   size_t rs_size =
-    _cur_collection_pause_used_regions_at_start - collection_set_size();
+            _cur_collection_pause_used_regions_at_start - cset_region_length();
   size_t cur_used_bytes = _g1->used();
   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
   bool last_pause_included_initial_mark = false;
@@ -1226,10 +1192,6 @@
   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
                           end_time_sec, false);
 
-  guarantee(_cur_collection_pause_used_regions_at_start >=
-            collection_set_size(),
-            "Negative RS size?");
-
   // This assert is exempted when we're doing parallel collection pauses,
   // because the fragmentation caused by the parallel GC allocation buffers
   // can lead to more memory being used during collection than was used
@@ -1253,8 +1215,6 @@
     (double)surviving_bytes/
     (double)_collection_set_bytes_used_before;
 
-  _n_pauses++;
-
   // These values are used to update the summary information that is
   // displayed when TraceGen0Time is enabled, and are output as part
   // of the PrintGCDetails output, in the non-parallel case.
@@ -1295,10 +1255,6 @@
   _all_pause_times_ms->add(elapsed_ms);
 
   if (update_stats) {
-    _recent_rs_scan_times_ms->add(scan_rs_time);
-    _recent_pause_times_ms->add(elapsed_ms);
-    _recent_rs_sizes->add(rs_size);
-
     _summary->record_total_time_ms(elapsed_ms);
     _summary->record_other_time_ms(other_time_ms);
 
@@ -1342,9 +1298,6 @@
            || surviving_bytes <= _collection_set_bytes_used_before,
            "Or else negative collection!");
 
-    _recent_CS_bytes_used_before->add(_collection_set_bytes_used_before);
-    _recent_CS_bytes_surviving->add(surviving_bytes);
-
     // this is where we update the allocation rate of the application
     double app_time_ms =
       (_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
@@ -1354,13 +1307,17 @@
       // We'll just set it to something (arbitrarily) small.
       app_time_ms = 1.0;
     }
-    size_t regions_allocated =
-      (_region_num_young - _prev_region_num_young) +
-      (_region_num_tenured - _prev_region_num_tenured);
+    // We maintain the invariant that all objects allocated by mutator
+    // threads will be allocated out of eden regions. So, we can use
+    // the eden region number allocated since the previous GC to
+    // calculate the application's allocate rate. The only exception
+    // to that is humongous objects that are allocated separately. But
+    // given that humongous object allocations do not really affect
+    // either the pause's duration nor when the next pause will take
+    // place we can safely ignore them here.
+    size_t regions_allocated = eden_cset_region_length();
     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
     _alloc_rate_ms_seq->add(alloc_rate_ms);
-    _prev_region_num_young   = _region_num_young;
-    _prev_region_num_tenured = _region_num_tenured;
 
     double interval_ms =
       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
@@ -1398,33 +1355,6 @@
     }
   }
 
-
-  if (G1PolicyVerbose > 1) {
-    gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
-  }
-
-  if (G1PolicyVerbose > 1) {
-    gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
-                           "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
-                           "      |RS|: " SIZE_FORMAT,
-                           elapsed_ms, recent_avg_time_for_pauses_ms(),
-                           scan_rs_time, recent_avg_time_for_rs_scan_ms(),
-                           rs_size);
-
-    gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
-                           "       At end " SIZE_FORMAT "K\n"
-                           "       garbage      : " SIZE_FORMAT "K"
-                           "       of     " SIZE_FORMAT "K\n"
-                           "       survival     : %6.2f%%  (%6.2f%% avg)",
-                           _cur_collection_pause_used_at_start_bytes/K,
-                           _g1->used()/K, freed_bytes/K,
-                           _collection_set_bytes_used_before/K,
-                           survival_fraction*100.0,
-                           recent_avg_survival_fraction()*100.0);
-    gclog_or_tty->print_cr("       Recent %% gc pause time: %6.2f",
-                           recent_avg_pause_time_ratio() * 100.0);
-  }
-
   // PrintGCDetails output
   if (PrintGCDetails) {
     bool print_marking_info =
@@ -1576,8 +1506,6 @@
   _short_lived_surv_rate_group->start_adding_regions();
   // do that for any other surv rate groupsx
 
-  // <NEW PREDICTION>
-
   if (update_stats) {
     double pause_time_ms = elapsed_ms;
 
@@ -1631,21 +1559,21 @@
        _mark_closure_time_ms + termination_time);
 
     double young_other_time_ms = 0.0;
-    if (_recorded_young_regions > 0) {
+    if (young_cset_region_length() > 0) {
       young_other_time_ms =
         _recorded_young_cset_choice_time_ms +
         _recorded_young_free_cset_time_ms;
       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
-                                             (double) _recorded_young_regions);
+                                          (double) young_cset_region_length());
     }
     double non_young_other_time_ms = 0.0;
-    if (_recorded_non_young_regions > 0) {
+    if (old_cset_region_length() > 0) {
       non_young_other_time_ms =
         _recorded_non_young_cset_choice_time_ms +
         _recorded_non_young_free_cset_time_ms;
 
       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
-                                         (double) _recorded_non_young_regions);
+                                            (double) old_cset_region_length());
     }
 
     double constant_other_time_ms = all_other_time_ms -
@@ -1659,7 +1587,6 @@
     }
 
     _pending_cards_seq->add((double) _pending_cards);
-    _scanned_cards_seq->add((double) cards_scanned);
     _rs_lengths_seq->add((double) _max_rs_lengths);
 
     double expensive_region_limit_ms =
@@ -1670,49 +1597,6 @@
       expensive_region_limit_ms = (double) MaxGCPauseMillis;
     }
     _expensive_region_limit_ms = expensive_region_limit_ms;
-
-    if (PREDICTIONS_VERBOSE) {
-      gclog_or_tty->print_cr("");
-      gclog_or_tty->print_cr("PREDICTIONS %1.4lf %d "
-                    "REGIONS %d %d %d "
-                    "PENDING_CARDS %d %d "
-                    "CARDS_SCANNED %d %d "
-                    "RS_LENGTHS %d %d "
-                    "RS_UPDATE %1.6lf %1.6lf RS_SCAN %1.6lf %1.6lf "
-                    "SURVIVAL_RATIO %1.6lf %1.6lf "
-                    "OBJECT_COPY %1.6lf %1.6lf OTHER_CONSTANT %1.6lf %1.6lf "
-                    "OTHER_YOUNG %1.6lf %1.6lf "
-                    "OTHER_NON_YOUNG %1.6lf %1.6lf "
-                    "VTIME_DIFF %1.6lf TERMINATION %1.6lf "
-                    "ELAPSED %1.6lf %1.6lf ",
-                    _cur_collection_start_sec,
-                    (!_last_young_gc_full) ? 2 :
-                    (last_pause_included_initial_mark) ? 1 : 0,
-                    _recorded_region_num,
-                    _recorded_young_regions,
-                    _recorded_non_young_regions,
-                    _predicted_pending_cards, _pending_cards,
-                    _predicted_cards_scanned, cards_scanned,
-                    _predicted_rs_lengths, _max_rs_lengths,
-                    _predicted_rs_update_time_ms, update_rs_time,
-                    _predicted_rs_scan_time_ms, scan_rs_time,
-                    _predicted_survival_ratio, survival_ratio,
-                    _predicted_object_copy_time_ms, obj_copy_time,
-                    _predicted_constant_other_time_ms, constant_other_time_ms,
-                    _predicted_young_other_time_ms, young_other_time_ms,
-                    _predicted_non_young_other_time_ms,
-                    non_young_other_time_ms,
-                    _vtime_diff_ms, termination_time,
-                    _predicted_pause_time_ms, elapsed_ms);
-    }
-
-    if (G1PolicyVerbose > 0) {
-      gclog_or_tty->print_cr("Pause Time, predicted: %1.4lfms (predicted %s), actual: %1.4lfms",
-                    _predicted_pause_time_ms,
-                    (_within_target) ? "within" : "outside",
-                    elapsed_ms);
-    }
-
   }
 
   _in_marking_window = new_in_marking_window;
@@ -1723,7 +1607,6 @@
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
   adjust_concurrent_refinement(update_rs_time, update_rs_processed_buffers, update_rs_time_goal_ms);
-  // </NEW PREDICTION>
 
   assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
 }
@@ -1768,8 +1651,6 @@
   }
 }
 
-// <NEW PREDICTION>
-
 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
                                                      double update_rs_processed_buffers,
                                                      double goal_ms) {
@@ -1905,98 +1786,17 @@
 }
 
 void
-G1CollectorPolicy::start_recording_regions() {
-  _recorded_rs_lengths            = 0;
-  _recorded_young_regions         = 0;
-  _recorded_non_young_regions     = 0;
-
-#if PREDICTIONS_VERBOSE
-  _recorded_marked_bytes          = 0;
-  _recorded_young_bytes           = 0;
-  _predicted_bytes_to_copy        = 0;
-  _predicted_rs_lengths           = 0;
-  _predicted_cards_scanned        = 0;
-#endif // PREDICTIONS_VERBOSE
-}
-
-void
-G1CollectorPolicy::record_cset_region_info(HeapRegion* hr, bool young) {
-#if PREDICTIONS_VERBOSE
-  if (!young) {
-    _recorded_marked_bytes += hr->max_live_bytes();
-  }
-  _predicted_bytes_to_copy += predict_bytes_to_copy(hr);
-#endif // PREDICTIONS_VERBOSE
-
-  size_t rs_length = hr->rem_set()->occupied();
-  _recorded_rs_lengths += rs_length;
-}
-
-void
-G1CollectorPolicy::record_non_young_cset_region(HeapRegion* hr) {
-  assert(!hr->is_young(), "should not call this");
-  ++_recorded_non_young_regions;
-  record_cset_region_info(hr, false);
-}
-
-void
-G1CollectorPolicy::set_recorded_young_regions(size_t n_regions) {
-  _recorded_young_regions = n_regions;
-}
-
-void G1CollectorPolicy::set_recorded_young_bytes(size_t bytes) {
-#if PREDICTIONS_VERBOSE
-  _recorded_young_bytes = bytes;
-#endif // PREDICTIONS_VERBOSE
+G1CollectorPolicy::init_cset_region_lengths(size_t eden_cset_region_length,
+                                          size_t survivor_cset_region_length) {
+  _eden_cset_region_length     = eden_cset_region_length;
+  _survivor_cset_region_length = survivor_cset_region_length;
+  _old_cset_region_length      = 0;
 }
 
 void G1CollectorPolicy::set_recorded_rs_lengths(size_t rs_lengths) {
   _recorded_rs_lengths = rs_lengths;
 }
 
-void G1CollectorPolicy::set_predicted_bytes_to_copy(size_t bytes) {
-  _predicted_bytes_to_copy = bytes;
-}
-
-void
-G1CollectorPolicy::end_recording_regions() {
-  // The _predicted_pause_time_ms field is referenced in code
-  // not under PREDICTIONS_VERBOSE. Let's initialize it.
-  _predicted_pause_time_ms = -1.0;
-
-#if PREDICTIONS_VERBOSE
-  _predicted_pending_cards = predict_pending_cards();
-  _predicted_rs_lengths = _recorded_rs_lengths + predict_rs_length_diff();
-  if (full_young_gcs())
-    _predicted_cards_scanned += predict_young_card_num(_predicted_rs_lengths);
-  else
-    _predicted_cards_scanned +=
-      predict_non_young_card_num(_predicted_rs_lengths);
-  _recorded_region_num = _recorded_young_regions + _recorded_non_young_regions;
-
-  _predicted_rs_update_time_ms =
-    predict_rs_update_time_ms(_g1->pending_card_num());
-  _predicted_rs_scan_time_ms =
-    predict_rs_scan_time_ms(_predicted_cards_scanned);
-  _predicted_object_copy_time_ms =
-    predict_object_copy_time_ms(_predicted_bytes_to_copy);
-  _predicted_constant_other_time_ms =
-    predict_constant_other_time_ms();
-  _predicted_young_other_time_ms =
-    predict_young_other_time_ms(_recorded_young_regions);
-  _predicted_non_young_other_time_ms =
-    predict_non_young_other_time_ms(_recorded_non_young_regions);
-
-  _predicted_pause_time_ms =
-    _predicted_rs_update_time_ms +
-    _predicted_rs_scan_time_ms +
-    _predicted_object_copy_time_ms +
-    _predicted_constant_other_time_ms +
-    _predicted_young_other_time_ms +
-    _predicted_non_young_other_time_ms;
-#endif // PREDICTIONS_VERBOSE
-}
-
 void G1CollectorPolicy::check_if_region_is_too_expensive(double
                                                            predicted_time_ms) {
   // I don't think we need to do this when in young GC mode since
@@ -2013,9 +1813,6 @@
   }
 }
 
-// </NEW PREDICTION>
-
-
 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
                                                double elapsed_ms) {
   _recent_gc_times_ms->add(elapsed_ms);
@@ -2023,99 +1820,6 @@
   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
 }
 
-double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
-  if (_recent_pause_times_ms->num() == 0) {
-    return (double) MaxGCPauseMillis;
-  }
-  return _recent_pause_times_ms->avg();
-}
-
-double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
-  if (_recent_rs_scan_times_ms->num() == 0) {
-    return (double)MaxGCPauseMillis/3.0;
-  }
-  return _recent_rs_scan_times_ms->avg();
-}
-
-int G1CollectorPolicy::number_of_recent_gcs() {
-  assert(_recent_rs_scan_times_ms->num() ==
-         _recent_pause_times_ms->num(), "Sequence out of sync");
-  assert(_recent_pause_times_ms->num() ==
-         _recent_CS_bytes_used_before->num(), "Sequence out of sync");
-  assert(_recent_CS_bytes_used_before->num() ==
-         _recent_CS_bytes_surviving->num(), "Sequence out of sync");
-
-  return _recent_pause_times_ms->num();
-}
-
-double G1CollectorPolicy::recent_avg_survival_fraction() {
-  return recent_avg_survival_fraction_work(_recent_CS_bytes_surviving,
-                                           _recent_CS_bytes_used_before);
-}
-
-double G1CollectorPolicy::last_survival_fraction() {
-  return last_survival_fraction_work(_recent_CS_bytes_surviving,
-                                     _recent_CS_bytes_used_before);
-}
-
-double
-G1CollectorPolicy::recent_avg_survival_fraction_work(TruncatedSeq* surviving,
-                                                     TruncatedSeq* before) {
-  assert(surviving->num() == before->num(), "Sequence out of sync");
-  if (before->sum() > 0.0) {
-      double recent_survival_rate = surviving->sum() / before->sum();
-      // We exempt parallel collection from this check because Alloc Buffer
-      // fragmentation can produce negative collections.
-      // Further, we're now always doing parallel collection.  But I'm still
-      // leaving this here as a placeholder for a more precise assertion later.
-      // (DLD, 10/05.)
-      assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
-             _g1->evacuation_failed() ||
-             recent_survival_rate <= 1.0, "Or bad frac");
-      return recent_survival_rate;
-  } else {
-    return 1.0; // Be conservative.
-  }
-}
-
-double
-G1CollectorPolicy::last_survival_fraction_work(TruncatedSeq* surviving,
-                                               TruncatedSeq* before) {
-  assert(surviving->num() == before->num(), "Sequence out of sync");
-  if (surviving->num() > 0 && before->last() > 0.0) {
-    double last_survival_rate = surviving->last() / before->last();
-    // We exempt parallel collection from this check because Alloc Buffer
-    // fragmentation can produce negative collections.
-    // Further, we're now always doing parallel collection.  But I'm still
-    // leaving this here as a placeholder for a more precise assertion later.
-    // (DLD, 10/05.)
-    assert((true || G1CollectedHeap::use_parallel_gc_threads()) ||
-           last_survival_rate <= 1.0, "Or bad frac");
-    return last_survival_rate;
-  } else {
-    return 1.0;
-  }
-}
-
-static const int survival_min_obs = 5;
-static double survival_min_obs_limits[] = { 0.9, 0.7, 0.5, 0.3, 0.1 };
-static const double min_survival_rate = 0.1;
-
-double
-G1CollectorPolicy::conservative_avg_survival_fraction_work(double avg,
-                                                           double latest) {
-  double res = avg;
-  if (number_of_recent_gcs() < survival_min_obs) {
-    res = MAX2(res, survival_min_obs_limits[number_of_recent_gcs()]);
-  }
-  res = MAX2(res, latest);
-  res = MAX2(res, min_survival_rate);
-  // In the parallel case, LAB fragmentation can produce "negative
-  // collections"; so can evac failure.  Cap at 1.0
-  res = MIN2(res, 1.0);
-  return res;
-}
-
 size_t G1CollectorPolicy::expansion_amount() {
   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
   double threshold = _gc_overhead_perc;
@@ -2331,15 +2035,6 @@
         print_summary_sd(0, buffer, &_all_aux_times_ms[i]);
       }
     }
-
-    size_t all_region_num = _region_num_young + _region_num_tenured;
-    gclog_or_tty->print_cr("   New Regions %8d, Young %8d (%6.2lf%%), "
-               "Tenured %8d (%6.2lf%%)",
-               all_region_num,
-               _region_num_young,
-               (double) _region_num_young / (double) all_region_num * 100.0,
-               _region_num_tenured,
-               (double) _region_num_tenured / (double) all_region_num * 100.0);
   }
   if (TraceGen1Time) {
     if (_all_full_gc_times_ms->num() > 0) {
@@ -2361,14 +2056,6 @@
 #endif // PRODUCT
 }
 
-void G1CollectorPolicy::update_region_num(bool young) {
-  if (young) {
-    ++_region_num_young;
-  } else {
-    ++_region_num_tenured;
-  }
-}
-
 #ifndef PRODUCT
 // for debugging, bit of a hack...
 static char*
@@ -2682,8 +2369,7 @@
 }
 
 // Add the heap region at the head of the non-incremental collection set
-void G1CollectorPolicy::
-add_to_collection_set(HeapRegion* hr) {
+void G1CollectorPolicy::add_old_region_to_cset(HeapRegion* hr) {
   assert(_inc_cset_build_state == Active, "Precondition");
   assert(!hr->is_young(), "non-incremental add of young region");
 
@@ -2694,9 +2380,11 @@
   hr->set_in_collection_set(true);
   hr->set_next_in_collection_set(_collection_set);
   _collection_set = hr;
-  _collection_set_size++;
   _collection_set_bytes_used_before += hr->used();
   _g1->register_region_with_in_cset_fast_test(hr);
+  size_t rs_length = hr->rem_set()->occupied();
+  _recorded_rs_lengths += rs_length;
+  _old_cset_region_length += 1;
 }
 
 // Initialize the per-collection-set information
@@ -2705,16 +2393,11 @@
 
   _inc_cset_head = NULL;
   _inc_cset_tail = NULL;
-  _inc_cset_size = 0;
   _inc_cset_bytes_used_before = 0;
 
-  _inc_cset_young_index = 0;
-
   _inc_cset_max_finger = 0;
-  _inc_cset_recorded_young_bytes = 0;
   _inc_cset_recorded_rs_lengths = 0;
   _inc_cset_predicted_elapsed_time_ms = 0;
-  _inc_cset_predicted_bytes_to_copy = 0;
   _inc_cset_build_state = Active;
 }
 
@@ -2745,20 +2428,6 @@
   // rset sampling code
   hr->set_recorded_rs_length(rs_length);
   hr->set_predicted_elapsed_time_ms(region_elapsed_time_ms);
-
-#if PREDICTIONS_VERBOSE
-  size_t bytes_to_copy = predict_bytes_to_copy(hr);
-  _inc_cset_predicted_bytes_to_copy += bytes_to_copy;
-
-  // Record the number of bytes used in this region
-  _inc_cset_recorded_young_bytes += used_bytes;
-
-  // Cache the values we have added to the aggregated informtion
-  // in the heap region in case we have to remove this region from
-  // the incremental collection set, or it is updated by the
-  // rset sampling code
-  hr->set_predicted_bytes_to_copy(bytes_to_copy);
-#endif // PREDICTIONS_VERBOSE
 }
 
 void G1CollectorPolicy::remove_from_incremental_cset_info(HeapRegion* hr) {
@@ -2784,17 +2453,6 @@
   // Clear the values cached in the heap region
   hr->set_recorded_rs_length(0);
   hr->set_predicted_elapsed_time_ms(0);
-
-#if PREDICTIONS_VERBOSE
-  size_t old_predicted_bytes_to_copy = hr->predicted_bytes_to_copy();
-  _inc_cset_predicted_bytes_to_copy -= old_predicted_bytes_to_copy;
-
-  // Subtract the number of bytes used in this region
-  _inc_cset_recorded_young_bytes -= used_bytes;
-
-  // Clear the values cached in the heap region
-  hr->set_predicted_bytes_to_copy(0);
-#endif // PREDICTIONS_VERBOSE
 }
 
 void G1CollectorPolicy::update_incremental_cset_info(HeapRegion* hr, size_t new_rs_length) {
@@ -2806,8 +2464,8 @@
 }
 
 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
-  assert( hr->is_young(), "invariant");
-  assert( hr->young_index_in_cset() == -1, "invariant" );
+  assert(hr->is_young(), "invariant");
+  assert(hr->young_index_in_cset() > -1, "should have already been set");
   assert(_inc_cset_build_state == Active, "Precondition");
 
   // We need to clear and set the cached recorded/cached collection set
@@ -2827,11 +2485,7 @@
   hr->set_in_collection_set(true);
   assert( hr->next_in_collection_set() == NULL, "invariant");
 
-  _inc_cset_size++;
   _g1->register_region_with_in_cset_fast_test(hr);
-
-  hr->set_young_index_in_cset((int) _inc_cset_young_index);
-  ++_inc_cset_young_index;
 }
 
 // Add the region at the RHS of the incremental cset
@@ -2899,8 +2553,6 @@
 
   YoungList* young_list = _g1->young_list();
 
-  start_recording_regions();
-
   guarantee(target_pause_time_ms > 0.0,
             err_msg("target_pause_time_ms = %1.6lf should be positive",
                     target_pause_time_ms));
@@ -2923,7 +2575,6 @@
   if (time_remaining_ms < threshold) {
     double prev_time_remaining_ms = time_remaining_ms;
     time_remaining_ms = 0.50 * target_pause_time_ms;
-    _within_target = false;
     ergo_verbose3(ErgoCSetConstruction,
                   "adjust remaining time",
                   ergo_format_reason("remaining time lower than threshold")
@@ -2931,8 +2582,6 @@
                   ergo_format_ms("threshold")
                   ergo_format_ms("adjusted remaining time"),
                   prev_time_remaining_ms, threshold, time_remaining_ms);
-  } else {
-    _within_target = true;
   }
 
   size_t expansion_bytes = _g1->expansion_regions() * HeapRegion::GrainBytes;
@@ -2941,8 +2590,6 @@
   double young_start_time_sec = os::elapsedTime();
 
   _collection_set_bytes_used_before = 0;
-  _collection_set_size = 0;
-  _young_cset_length  = 0;
   _last_young_gc_full = full_young_gcs() ? true : false;
 
   if (_last_young_gc_full) {
@@ -2955,9 +2602,9 @@
   // pause are appended to the RHS of the young list, i.e.
   //   [Newly Young Regions ++ Survivors from last pause].
 
-  size_t survivor_region_num = young_list->survivor_length();
-  size_t eden_region_num = young_list->length() - survivor_region_num;
-  size_t old_region_num = 0;
+  size_t survivor_region_length = young_list->survivor_length();
+  size_t eden_region_length = young_list->length() - survivor_region_length;
+  init_cset_region_lengths(eden_region_length, survivor_region_length);
   hr = young_list->first_survivor_region();
   while (hr != NULL) {
     assert(hr->is_survivor(), "badly formed young list");
@@ -2971,9 +2618,7 @@
   if (_g1->mark_in_progress())
     _g1->concurrent_mark()->register_collection_set_finger(_inc_cset_max_finger);
 
-  _young_cset_length = _inc_cset_young_index;
   _collection_set = _inc_cset_head;
-  _collection_set_size = _inc_cset_size;
   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
@@ -2983,19 +2628,12 @@
                 ergo_format_region("eden")
                 ergo_format_region("survivors")
                 ergo_format_ms("predicted young region time"),
-                eden_region_num, survivor_region_num,
+                eden_region_length, survivor_region_length,
                 _inc_cset_predicted_elapsed_time_ms);
 
   // The number of recorded young regions is the incremental
   // collection set's current size
-  set_recorded_young_regions(_inc_cset_size);
   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
-  set_recorded_young_bytes(_inc_cset_recorded_young_bytes);
-#if PREDICTIONS_VERBOSE
-  set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
-#endif // PREDICTIONS_VERBOSE
-
-  assert(_inc_cset_size == young_list->length(), "Invariant");
 
   double young_end_time_sec = os::elapsedTime();
   _recorded_young_cset_choice_time_ms =
@@ -3009,9 +2647,16 @@
     NumberSeq seq;
     double avg_prediction = 100000000000000000.0; // something very large
 
-    size_t prev_collection_set_size = _collection_set_size;
     double prev_predicted_pause_time_ms = predicted_pause_time_ms;
     do {
+      // Note that add_old_region_to_cset() increments the
+      // _old_cset_region_length field and cset_region_length() returns the
+      // sum of _eden_cset_region_length, _survivor_cset_region_length, and
+      // _old_cset_region_length. So, as old regions are added to the
+      // CSet, _old_cset_region_length will be incremented and
+      // cset_region_length(), which is used below, will always reflect
+      // the the total number of regions added up to this point to the CSet.
+
       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
                                                       avg_prediction);
       if (hr != NULL) {
@@ -3019,8 +2664,7 @@
         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
         time_remaining_ms -= predicted_time_ms;
         predicted_pause_time_ms += predicted_time_ms;
-        add_to_collection_set(hr);
-        record_non_young_cset_region(hr);
+        add_old_region_to_cset(hr);
         seq.add(predicted_time_ms);
         avg_prediction = seq.avg() + seq.sd();
       }
@@ -3041,13 +2685,13 @@
             should_continue = false;
           }
         } else {
-          if (_collection_set_size >= _young_list_fixed_length) {
+          if (cset_region_length() >= _young_list_fixed_length) {
             ergo_verbose2(ErgoCSetConstruction,
                           "stop adding old regions to CSet",
                           ergo_format_reason("CSet length reached target")
                           ergo_format_region("CSet")
                           ergo_format_region("young target"),
-                          _collection_set_size, _young_list_fixed_length);
+                          cset_region_length(), _young_list_fixed_length);
             should_continue = false;
           }
         }
@@ -3055,23 +2699,21 @@
     } while (should_continue);
 
     if (!adaptive_young_list_length() &&
-        _collection_set_size < _young_list_fixed_length) {
+                             cset_region_length() < _young_list_fixed_length) {
       ergo_verbose2(ErgoCSetConstruction,
                     "request partially-young GCs end",
                     ergo_format_reason("CSet length lower than target")
                     ergo_format_region("CSet")
                     ergo_format_region("young target"),
-                    _collection_set_size, _young_list_fixed_length);
+                    cset_region_length(), _young_list_fixed_length);
       _should_revert_to_full_young_gcs  = true;
     }
 
-    old_region_num = _collection_set_size - prev_collection_set_size;
-
     ergo_verbose2(ErgoCSetConstruction | ErgoHigh,
                   "add old regions to CSet",
                   ergo_format_region("old")
                   ergo_format_ms("predicted old region time"),
-                  old_region_num,
+                  old_cset_region_length(),
                   predicted_pause_time_ms - prev_predicted_pause_time_ms);
   }
 
@@ -3079,8 +2721,6 @@
 
   count_CS_bytes_used();
 
-  end_recording_regions();
-
   ergo_verbose5(ErgoCSetConstruction,
                 "finish choosing CSet",
                 ergo_format_region("eden")
@@ -3088,7 +2728,8 @@
                 ergo_format_region("old")
                 ergo_format_ms("predicted pause time")
                 ergo_format_ms("target pause time"),
-                eden_region_num, survivor_region_num, old_region_num,
+                eden_region_length, survivor_region_length,
+                old_cset_region_length(),
                 predicted_pause_time_ms, target_pause_time_ms);
 
   double non_young_end_time_sec = os::elapsedTime();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Nov 22 04:47:10 2011 -0500
@@ -85,9 +85,6 @@
 
 class G1CollectorPolicy: public CollectorPolicy {
 private:
-  // The number of pauses during the execution.
-  long _n_pauses;
-
   // either equal to the number of parallel threads, if ParallelGCThreads
   // has been set, or 1 otherwise
   int _parallel_gc_threads;
@@ -127,18 +124,9 @@
   jlong  _num_cc_clears;                // number of times the card count cache has been cleared
 #endif
 
-  // Statistics for recent GC pauses.  See below for how indexed.
-  TruncatedSeq* _recent_rs_scan_times_ms;
-
   // These exclude marking times.
-  TruncatedSeq* _recent_pause_times_ms;
   TruncatedSeq* _recent_gc_times_ms;
 
-  TruncatedSeq* _recent_CS_bytes_used_before;
-  TruncatedSeq* _recent_CS_bytes_surviving;
-
-  TruncatedSeq* _recent_rs_sizes;
-
   TruncatedSeq* _concurrent_mark_remark_times_ms;
   TruncatedSeq* _concurrent_mark_cleanup_times_ms;
 
@@ -150,13 +138,6 @@
   NumberSeq* _all_stop_world_times_ms;
   NumberSeq* _all_yield_times_ms;
 
-  size_t     _region_num_young;
-  size_t     _region_num_tenured;
-  size_t     _prev_region_num_young;
-  size_t     _prev_region_num_tenured;
-
-  NumberSeq* _all_mod_union_times_ms;
-
   int        _aux_num;
   NumberSeq* _all_aux_times_ms;
   double*    _cur_aux_start_times_ms;
@@ -194,7 +175,6 @@
   // locker is active. This should be >= _young_list_target_length;
   size_t _young_list_max_length;
 
-  size_t _young_cset_length;
   bool   _last_young_gc_full;
 
   unsigned              _full_young_pause_num;
@@ -217,8 +197,6 @@
     return _during_marking;
   }
 
-  // <NEW PREDICTION>
-
 private:
   enum PredictionConstants {
     TruncatedSeqLength = 10
@@ -240,47 +218,32 @@
   TruncatedSeq* _non_young_other_cost_per_region_ms_seq;
 
   TruncatedSeq* _pending_cards_seq;
-  TruncatedSeq* _scanned_cards_seq;
   TruncatedSeq* _rs_lengths_seq;
 
   TruncatedSeq* _cost_per_byte_ms_during_cm_seq;
 
   TruncatedSeq* _young_gc_eff_seq;
 
-  TruncatedSeq* _max_conc_overhead_seq;
-
   bool   _using_new_ratio_calculations;
   size_t _min_desired_young_length; // as set on the command line or default calculations
   size_t _max_desired_young_length; // as set on the command line or default calculations
 
-  size_t _recorded_young_regions;
-  size_t _recorded_non_young_regions;
-  size_t _recorded_region_num;
+  size_t _eden_cset_region_length;
+  size_t _survivor_cset_region_length;
+  size_t _old_cset_region_length;
+
+  void init_cset_region_lengths(size_t eden_cset_region_length,
+                                size_t survivor_cset_region_length);
+
+  size_t eden_cset_region_length()     { return _eden_cset_region_length;     }
+  size_t survivor_cset_region_length() { return _survivor_cset_region_length; }
+  size_t old_cset_region_length()      { return _old_cset_region_length;      }
 
   size_t _free_regions_at_end_of_collection;
 
   size_t _recorded_rs_lengths;
   size_t _max_rs_lengths;
 
-  size_t _recorded_marked_bytes;
-  size_t _recorded_young_bytes;
-
-  size_t _predicted_pending_cards;
-  size_t _predicted_cards_scanned;
-  size_t _predicted_rs_lengths;
-  size_t _predicted_bytes_to_copy;
-
-  double _predicted_survival_ratio;
-  double _predicted_rs_update_time_ms;
-  double _predicted_rs_scan_time_ms;
-  double _predicted_object_copy_time_ms;
-  double _predicted_constant_other_time_ms;
-  double _predicted_young_other_time_ms;
-  double _predicted_non_young_other_time_ms;
-  double _predicted_pause_time_ms;
-
-  double _vtime_diff_ms;
-
   double _recorded_young_free_cset_time_ms;
   double _recorded_non_young_free_cset_time_ms;
 
@@ -320,18 +283,21 @@
   double _pause_time_target_ms;
   double _recorded_young_cset_choice_time_ms;
   double _recorded_non_young_cset_choice_time_ms;
-  bool   _within_target;
   size_t _pending_cards;
   size_t _max_pending_cards;
 
 public:
 
-  void set_region_short_lived(HeapRegion* hr) {
+  void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
+    hr->set_young();
     hr->install_surv_rate_group(_short_lived_surv_rate_group);
+    hr->set_young_index_in_cset(young_index_in_cset);
   }
 
-  void set_region_survivors(HeapRegion* hr) {
+  void set_region_survivor(HeapRegion* hr, int young_index_in_cset) {
+    assert(hr->is_young() && hr->is_survivor(), "pre-condition");
     hr->install_surv_rate_group(_survivor_surv_rate_group);
+    hr->set_young_index_in_cset(young_index_in_cset);
   }
 
 #ifndef PRODUCT
@@ -343,10 +309,6 @@
                 seq->davg() * confidence_factor(seq->num()));
   }
 
-  size_t young_cset_length() {
-    return _young_cset_length;
-  }
-
   void record_max_rs_lengths(size_t rs_lengths) {
     _max_rs_lengths = rs_lengths;
   }
@@ -465,20 +427,12 @@
   size_t predict_bytes_to_copy(HeapRegion* hr);
   double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
 
-  void start_recording_regions();
-  void record_cset_region_info(HeapRegion* hr, bool young);
-  void record_non_young_cset_region(HeapRegion* hr);
+  void set_recorded_rs_lengths(size_t rs_lengths);
 
-  void set_recorded_young_regions(size_t n_regions);
-  void set_recorded_young_bytes(size_t bytes);
-  void set_recorded_rs_lengths(size_t rs_lengths);
-  void set_predicted_bytes_to_copy(size_t bytes);
-
-  void end_recording_regions();
-
-  void record_vtime_diff_ms(double vtime_diff_ms) {
-    _vtime_diff_ms = vtime_diff_ms;
-  }
+  size_t cset_region_length()       { return young_cset_region_length() +
+                                             old_cset_region_length(); }
+  size_t young_cset_region_length() { return eden_cset_region_length() +
+                                             survivor_cset_region_length(); }
 
   void record_young_free_cset_time_ms(double time_ms) {
     _recorded_young_free_cset_time_ms = time_ms;
@@ -494,8 +448,6 @@
 
   double predict_survivor_regions_evac_time();
 
-  // </NEW PREDICTION>
-
   void cset_regions_freed() {
     bool propagate = _last_young_gc_full && !_in_marking_window;
     _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
@@ -576,7 +528,6 @@
   double max_sum (double* data1, double* data2);
 
   int _last_satb_drain_processed_buffers;
-  int _last_update_rs_processed_buffers;
   double _last_pause_time_ms;
 
   size_t _bytes_in_collection_set_before_gc;
@@ -596,10 +547,6 @@
   // set at the start of the pause.
   HeapRegion* _collection_set;
 
-  // The number of regions in the collection set. Set from the incrementally
-  // built collection set at the start of an evacuation pause.
-  size_t _collection_set_size;
-
   // The number of bytes in the collection set before the pause. Set from
   // the incrementally built collection set at the start of an evacuation
   // pause.
@@ -622,16 +569,6 @@
   // The tail of the incrementally built collection set.
   HeapRegion* _inc_cset_tail;
 
-  // The number of regions in the incrementally built collection set.
-  // Used to set _collection_set_size at the start of an evacuation
-  // pause.
-  size_t _inc_cset_size;
-
-  // Used as the index in the surving young words structure
-  // which tracks the amount of space, for each young region,
-  // that survives the pause.
-  size_t _inc_cset_young_index;
-
   // The number of bytes in the incrementally built collection set.
   // Used to set _collection_set_bytes_used_before at the start of
   // an evacuation pause.
@@ -640,11 +577,6 @@
   // Used to record the highest end of heap region in collection set
   HeapWord* _inc_cset_max_finger;
 
-  // The number of recorded used bytes in the young regions
-  // of the collection set. This is the sum of the used() bytes
-  // of retired young regions in the collection set.
-  size_t _inc_cset_recorded_young_bytes;
-
   // The RSet lengths recorded for regions in the collection set
   // (updated by the periodic sampling of the regions in the
   // young list/collection set).
@@ -655,68 +587,9 @@
   // regions in the young list/collection set).
   double _inc_cset_predicted_elapsed_time_ms;
 
-  // The predicted bytes to copy for the regions in the collection
-  // set (updated by the periodic sampling of the regions in the
-  // young list/collection set).
-  size_t _inc_cset_predicted_bytes_to_copy;
-
   // Stash a pointer to the g1 heap.
   G1CollectedHeap* _g1;
 
-  // The average time in ms per collection pause, averaged over recent pauses.
-  double recent_avg_time_for_pauses_ms();
-
-  // The average time in ms for RS scanning, per pause, averaged
-  // over recent pauses. (Note the RS scanning time for a pause
-  // is itself an average of the RS scanning time for each worker
-  // thread.)
-  double recent_avg_time_for_rs_scan_ms();
-
-  // The number of "recent" GCs recorded in the number sequences
-  int number_of_recent_gcs();
-
-  // The average survival ratio, computed by the total number of bytes
-  // suriviving / total number of bytes before collection over the last
-  // several recent pauses.
-  double recent_avg_survival_fraction();
-  // The survival fraction of the most recent pause; if there have been no
-  // pauses, returns 1.0.
-  double last_survival_fraction();
-
-  // Returns a "conservative" estimate of the recent survival rate, i.e.,
-  // one that may be higher than "recent_avg_survival_fraction".
-  // This is conservative in several ways:
-  //   If there have been few pauses, it will assume a potential high
-  //     variance, and err on the side of caution.
-  //   It puts a lower bound (currently 0.1) on the value it will return.
-  //   To try to detect phase changes, if the most recent pause ("latest") has a
-  //     higher-than average ("avg") survival rate, it returns that rate.
-  // "work" version is a utility function; young is restricted to young regions.
-  double conservative_avg_survival_fraction_work(double avg,
-                                                 double latest);
-
-  // The arguments are the two sequences that keep track of the number of bytes
-  //   surviving and the total number of bytes before collection, resp.,
-  //   over the last evereal recent pauses
-  // Returns the survival rate for the category in the most recent pause.
-  // If there have been no pauses, returns 1.0.
-  double last_survival_fraction_work(TruncatedSeq* surviving,
-                                     TruncatedSeq* before);
-
-  // The arguments are the two sequences that keep track of the number of bytes
-  //   surviving and the total number of bytes before collection, resp.,
-  //   over the last several recent pauses
-  // Returns the average survival ration over the last several recent pauses
-  // If there have been no pauses, return 1.0
-  double recent_avg_survival_fraction_work(TruncatedSeq* surviving,
-                                           TruncatedSeq* before);
-
-  double conservative_avg_survival_fraction() {
-    double avg = recent_avg_survival_fraction();
-    double latest = last_survival_fraction();
-    return conservative_avg_survival_fraction_work(avg, latest);
-  }
-
   // The ratio of gc time to elapsed time, computed over recent pauses.
   double _recent_avg_pause_time_ratio;
 
@@ -724,9 +597,6 @@
     return _recent_avg_pause_time_ratio;
   }
 
-  // Number of pauses between concurrent marking.
-  size_t _pauses_btwn_concurrent_mark;
-
   // At the end of a pause we check the heap occupancy and we decide
   // whether we will start a marking cycle during the next pause. If
   // we decide that we want to do that, we will set this parameter to
@@ -849,9 +719,6 @@
 
   GenRemSet::Name  rem_set_name()     { return GenRemSet::CardTable; }
 
-  // The number of collection pauses so far.
-  long n_pauses() const { return _n_pauses; }
-
   // Update the heuristic info to record a collection pause of the given
   // start time, where the given number of bytes were used at the start.
   // This may involve changing the desired size of a collection set.
@@ -905,10 +772,6 @@
     _last_satb_drain_processed_buffers = processed_buffers;
   }
 
-  void record_mod_union_time(double ms) {
-    _all_mod_union_times_ms->add(ms);
-  }
-
   void record_update_rs_time(int thread, double ms) {
     _par_last_update_rs_times_ms[thread] = ms;
   }
@@ -1009,11 +872,8 @@
 
   void clear_collection_set() { _collection_set = NULL; }
 
-  // The number of elements in the current collection set.
-  size_t collection_set_size() { return _collection_set_size; }
-
-  // Add "hr" to the CS.
-  void add_to_collection_set(HeapRegion* hr);
+  // Add old region "hr" to the CSet.
+  void add_old_region_to_cset(HeapRegion* hr);
 
   // Incremental CSet Support
 
@@ -1023,9 +883,6 @@
   // The tail of the incrementally built collection set.
   HeapRegion* inc_set_tail() { return _inc_cset_tail; }
 
-  // The number of elements in the incrementally built collection set.
-  size_t inc_cset_size() { return _inc_cset_size; }
-
   // Initialize incremental collection set info.
   void start_incremental_cset_building();
 
@@ -1125,8 +982,6 @@
     return _young_list_max_length;
   }
 
-  void update_region_num(bool young);
-
   bool full_young_gcs() {
     return _full_young_gcs;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Nov 22 04:47:10 2011 -0500
@@ -219,7 +219,7 @@
 HeapRegion* G1RemSet::calculateStartRegion(int worker_i) {
   HeapRegion* result = _g1p->collection_set();
   if (ParallelGCThreads > 0) {
-    size_t cs_size = _g1p->collection_set_size();
+    size_t cs_size = _g1p->cset_region_length();
     int n_workers = _g1->workers()->total_workers();
     size_t cs_spans = cs_size / n_workers;
     size_t ind      = cs_spans * worker_i;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Tue Nov 22 04:47:10 2011 -0500
@@ -39,10 +39,6 @@
   develop(intx, G1MarkingOverheadPercent, 0,                                \
           "Overhead of concurrent marking")                                 \
                                                                             \
-                                                                            \
-  develop(intx, G1PolicyVerbose, 0,                                         \
-          "The verbosity level on G1 policy decisions")                     \
-                                                                            \
   develop(intx, G1MarkingVerboseLevel, 0,                                   \
           "Level (0-4) of verboseness of the marking code")                 \
                                                                             \
@@ -58,9 +54,6 @@
   develop(bool, G1TraceMarkStackOverflow, false,                            \
           "If true, extra debugging code for CM restart for ovflw.")        \
                                                                             \
-  develop(intx, G1PausesBtwnConcMark, -1,                                   \
-          "If positive, fixed number of pauses between conc markings")      \
-                                                                            \
   diagnostic(bool, G1SummarizeConcMark, false,                              \
           "Summarize concurrent mark info")                                 \
                                                                             \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Nov 22 04:47:10 2011 -0500
@@ -416,7 +416,7 @@
 
   void add_to_marked_bytes(size_t incr_bytes) {
     _next_marked_bytes = _next_marked_bytes + incr_bytes;
-    guarantee( _next_marked_bytes <= used(), "invariant" );
+    assert(_next_marked_bytes <= used(), "invariant" );
   }
 
   void zero_marked_bytes()      {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweepDecorator.cpp	Tue Nov 22 04:47:10 2011 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -96,7 +96,8 @@
    * by the MarkSweepAlwaysCompactCount parameter. This is a significant
    * performance improvement!
    */
-  bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
+  bool skip_dead = (MarkSweepAlwaysCompactCount < 1)
+    || ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0);
 
   size_t allowed_deadspace = 0;
   if (skip_dead) {
--- a/hotspot/src/share/vm/memory/space.hpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/memory/space.hpp	Tue Nov 22 04:47:10 2011 -0500
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -533,7 +533,8 @@
    * by the MarkSweepAlwaysCompactCount parameter.                           \
    */                                                                        \
   int invocations = SharedHeap::heap()->perm_gen()->stat_record()->invocations;\
-  bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);       \
+  bool skip_dead = (MarkSweepAlwaysCompactCount < 1)                         \
+    ||((invocations % MarkSweepAlwaysCompactCount) != 0);                    \
                                                                              \
   size_t allowed_deadspace = 0;                                              \
   if (skip_dead) {                                                           \
--- a/hotspot/src/share/vm/services/memoryManager.cpp	Mon Nov 21 10:22:04 2011 -0500
+++ b/hotspot/src/share/vm/services/memoryManager.cpp	Tue Nov 22 04:47:10 2011 -0500
@@ -168,10 +168,8 @@
   // initialize the arrays for memory usage
   _before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
   _after_gc_usage_array  = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
-  size_t len = num_pools * sizeof(MemoryUsage);
-  memset(_before_gc_usage_array, 0, len);
-  memset(_after_gc_usage_array, 0, len);
   _usage_array_size = num_pools;
+  clear();
 }
 
 GCStatInfo::~GCStatInfo() {
@@ -304,12 +302,8 @@
       pool->set_last_collection_usage(usage);
       LowMemoryDetector::detect_after_gc_memory(pool);
     }
-    if(is_notification_enabled()) {
-      bool isMajorGC = this == MemoryService::get_major_gc_manager();
-      GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC",
-                                   GCCause::to_string(cause));
-    }
   }
+
   if (countCollection) {
     _num_collections++;
     // alternately update two objects making one public when complete
@@ -321,6 +315,12 @@
       // reset the current stat for diagnosability purposes
       _current_gc_stat->clear();
     }
+
+    if (is_notification_enabled()) {
+      bool isMajorGC = this == MemoryService::get_major_gc_manager();
+      GCNotifier::pushNotification(this, isMajorGC ? "end of major GC" : "end of minor GC",
+                                   GCCause::to_string(cause));
+    }
   }
 }