Merge
authorysr
Tue, 16 Aug 2011 08:02:29 -0700
changeset 10244 dae1b1e51fca
parent 10235 cb2283e51174 (current diff)
parent 10243 d00a21009f1f (diff)
child 10249 2c43be5c9bca
child 10272 9377a9510c83
Merge
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -125,6 +125,10 @@
 # include <inttypes.h>
 # include <sys/ioctl.h>
 
+#ifdef AMD64
+#include <asm/vsyscall.h>
+#endif
+
 #define MAX_PATH    (2 * K)
 
 // for timer info max values which include all bits
@@ -2534,7 +2538,7 @@
 }
 
 void os::free_memory(char *addr, size_t bytes) {
-  ::madvise(addr, bytes, MADV_DONTNEED);
+  commit_memory(addr, bytes, false);
 }
 
 void os::numa_make_global(char *addr, size_t bytes) {
@@ -2578,6 +2582,22 @@
   return end;
 }
 
+
+int os::Linux::sched_getcpu_syscall(void) {
+  unsigned int cpu;
+  int retval = -1;
+
+#if defined(IA32)
+  retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
+#elif defined(AMD64)
+  typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
+  vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
+  retval = vgetcpu(&cpu, NULL, NULL);
+#endif
+
+  return (retval == -1) ? retval : cpu;
+}
+
 // Something to do with the numa-aware allocator needs these symbols
 extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
 extern "C" JNIEXPORT void numa_error(char *where) { }
@@ -2601,6 +2621,10 @@
   set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
                                   dlsym(RTLD_DEFAULT, "sched_getcpu")));
 
+  // If it's not, try a direct syscall.
+  if (sched_getcpu() == -1)
+    set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
+
   if (sched_getcpu() != -1) { // Does it work?
     void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
     if (handle != NULL) {
--- a/hotspot/src/os/linux/vm/os_linux.hpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/os/linux/vm/os_linux.hpp	Tue Aug 16 08:02:29 2011 -0700
@@ -263,6 +263,7 @@
   static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
+  static int sched_getcpu_syscall(void);
 public:
   static int sched_getcpu()  { return _sched_getcpu != NULL ? _sched_getcpu() : -1; }
   static int numa_node_to_cpus(int node, unsigned long *buffer, int bufferlen) {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -2025,9 +2025,6 @@
                                             _intra_sweep_estimate.padded_average());
   }
 
-  {
-    TraceCMSMemoryManagerStats tmms(gch->gc_cause());
-  }
   GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
     ref_processor(), clear_all_soft_refs);
   #ifdef ASSERT
@@ -9345,15 +9342,3 @@
   }
 }
 
-// when bailing out of cms in concurrent mode failure
-TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(GCCause::Cause cause): TraceMemoryManagerStats() {
-  initialize(true /* fullGC */ ,
-             cause /* cause of the GC */,
-             true /* recordGCBeginTime */,
-             true /* recordPreGCUsage */,
-             true /* recordPeakUsage */,
-             true /* recordPostGCusage */,
-             true /* recordAccumulatedGCTime */,
-             true /* recordGCEndTime */,
-             true /* countCollection */ );
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Aug 16 08:02:29 2011 -0700
@@ -1900,7 +1900,6 @@
 
  public:
   TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
-  TraceCMSMemoryManagerStats(GCCause::Cause cause);
 };
 
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -129,6 +129,7 @@
     // region in _alloc_region. This is the reason why an active region
     // can never be empty.
     _alloc_region = new_alloc_region;
+    _count += 1;
     trace("region allocation successful");
     return result;
   } else {
@@ -139,8 +140,8 @@
 }
 
 void G1AllocRegion::fill_in_ext_msg(ar_ext_msg* msg, const char* message) {
-  msg->append("[%s] %s b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
-              _name, message, BOOL_TO_STR(_bot_updates),
+  msg->append("[%s] %s c: "SIZE_FORMAT" b: %s r: "PTR_FORMAT" u: "SIZE_FORMAT,
+              _name, message, _count, BOOL_TO_STR(_bot_updates),
               _alloc_region, _used_bytes_before);
 }
 
@@ -148,16 +149,34 @@
   trace("initializing");
   assert(_alloc_region == NULL && _used_bytes_before == 0,
          ar_ext_msg(this, "pre-condition"));
-  assert(_dummy_region != NULL, "should have been set");
+  assert(_dummy_region != NULL, ar_ext_msg(this, "should have been set"));
   _alloc_region = _dummy_region;
+  _count = 0;
   trace("initialized");
 }
 
+void G1AllocRegion::set(HeapRegion* alloc_region) {
+  trace("setting");
+  // We explicitly check that the region is not empty to make sure we
+  // maintain the "the alloc region cannot be empty" invariant.
+  assert(alloc_region != NULL && !alloc_region->is_empty(),
+         ar_ext_msg(this, "pre-condition"));
+  assert(_alloc_region == _dummy_region &&
+         _used_bytes_before == 0 && _count == 0,
+         ar_ext_msg(this, "pre-condition"));
+
+  _used_bytes_before = alloc_region->used();
+  _alloc_region = alloc_region;
+  _count += 1;
+  trace("set");
+}
+
 HeapRegion* G1AllocRegion::release() {
   trace("releasing");
   HeapRegion* alloc_region = _alloc_region;
   retire(false /* fill_up */);
-  assert(_alloc_region == _dummy_region, "post-condition of retire()");
+  assert(_alloc_region == _dummy_region,
+         ar_ext_msg(this, "post-condition of retire()"));
   _alloc_region = NULL;
   trace("released");
   return (alloc_region == _dummy_region) ? NULL : alloc_region;
@@ -196,7 +215,8 @@
       jio_snprintf(rest_buffer, buffer_length, "");
     }
 
-    tty->print_cr("[%s] %s : %s %s", _name, hr_buffer, str, rest_buffer);
+    tty->print_cr("[%s] "SIZE_FORMAT" %s : %s %s",
+                  _name, _count, hr_buffer, str, rest_buffer);
   }
 }
 #endif // G1_ALLOC_REGION_TRACING
@@ -204,5 +224,5 @@
 G1AllocRegion::G1AllocRegion(const char* name,
                              bool bot_updates)
   : _name(name), _bot_updates(bot_updates),
-    _alloc_region(NULL), _used_bytes_before(0) { }
+    _alloc_region(NULL), _count(0), _used_bytes_before(0) { }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp	Tue Aug 16 08:02:29 2011 -0700
@@ -36,7 +36,7 @@
 
 // A class that holds a region that is active in satisfying allocation
 // requests, potentially issued in parallel. When the active region is
-// full it will be retired it replaced with a new one. The
+// full it will be retired and replaced with a new one. The
 // implementation assumes that fast-path allocations will be lock-free
 // and a lock will need to be taken when the active region needs to be
 // replaced.
@@ -57,13 +57,22 @@
   // correct use of init() and release()).
   HeapRegion* _alloc_region;
 
+  // It keeps track of the distinct number of regions that are used
+  // for allocation in the active interval of this object, i.e.,
+  // between a call to init() and a call to release(). The count
+  // mostly includes regions that are freshly allocated, as well as
+  // the region that is re-used using the set() method. This count can
+  // be used in any heuristics that might want to bound how many
+  // distinct regions this object can used during an active interval.
+  size_t _count;
+
   // When we set up a new active region we save its used bytes in this
   // field so that, when we retire it, we can calculate how much space
   // we allocated in it.
   size_t _used_bytes_before;
 
-  // Specifies whether the allocate calls will do BOT updates or not.
-  bool _bot_updates;
+  // When true, indicates that allocate calls should do BOT updates.
+  const bool _bot_updates;
 
   // Useful for debugging and tracing.
   const char* _name;
@@ -127,6 +136,8 @@
     return (_alloc_region == _dummy_region) ? NULL : _alloc_region;
   }
 
+  size_t count() { return _count; }
+
   // The following two are the building blocks for the allocation method.
 
   // First-level allocation: Should be called without holding a
@@ -153,6 +164,12 @@
   // Should be called before we start using this object.
   void init();
 
+  // This can be used to set the active region to a specific
+  // region. (Use Example: we try to retain the last old GC alloc
+  // region that we've used during a GC and we can use set() to
+  // re-instate it at the beginning of the next GC.)
+  void set(HeapRegion* alloc_region);
+
   // Should be called when we want to release the active region which
   // is returned after it's been retired.
   HeapRegion* release();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -587,26 +587,6 @@
   return res;
 }
 
-HeapRegion* G1CollectedHeap::new_gc_alloc_region(int purpose,
-                                                 size_t word_size) {
-  HeapRegion* alloc_region = NULL;
-  if (_gc_alloc_region_counts[purpose] < g1_policy()->max_regions(purpose)) {
-    alloc_region = new_region(word_size, true /* do_expand */);
-    if (alloc_region != NULL) {
-      if (purpose == GCAllocForSurvived) {
-        _hr_printer.alloc(alloc_region, G1HRPrinter::Survivor);
-        alloc_region->set_survivor();
-      } else {
-        _hr_printer.alloc(alloc_region, G1HRPrinter::Old);
-      }
-      ++_gc_alloc_region_counts[purpose];
-    }
-  } else {
-    g1_policy()->note_alloc_region_limit_reached(purpose);
-  }
-  return alloc_region;
-}
-
 size_t G1CollectedHeap::humongous_obj_allocate_find_first(size_t num_regions,
                                                           size_t word_size) {
   assert(isHumongous(word_size), "word_size should be humongous");
@@ -1091,12 +1071,6 @@
   ShouldNotReachHere();
 }
 
-void G1CollectedHeap::abandon_gc_alloc_regions() {
-  // first, make sure that the GC alloc region list is empty (it should!)
-  assert(_gc_alloc_region_list == NULL, "invariant");
-  release_gc_alloc_regions(true /* totally */);
-}
-
 class PostMCRemSetClearClosure: public HeapRegionClosure {
   ModRefBarrierSet* _mr_bs;
 public:
@@ -1781,7 +1755,11 @@
 void G1CollectedHeap::shrink(size_t shrink_bytes) {
   verify_region_sets_optional();
 
-  release_gc_alloc_regions(true /* totally */);
+  // We should only reach here at the end of a Full GC which means we
+  // should not not be holding to any GC alloc regions. The method
+  // below will make sure of that and do any remaining clean up.
+  abandon_gc_alloc_regions();
+
   // Instead of tearing down / rebuilding the free lists here, we
   // could instead use the remove_all_pending() method on free_list to
   // remove only the ones that we need to remove.
@@ -1821,6 +1799,7 @@
   _free_regions_coming(false),
   _young_list(new YoungList(this)),
   _gc_time_stamp(0),
+  _retained_old_gc_alloc_region(NULL),
   _surviving_young_words(NULL),
   _full_collections_completed(0),
   _in_cset_fast_test(NULL),
@@ -1852,20 +1831,6 @@
     _task_queues->register_queue(i, q);
   }
 
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    _gc_alloc_regions[ap]          = NULL;
-    _gc_alloc_region_counts[ap]    = 0;
-    _retained_gc_alloc_regions[ap] = NULL;
-    // by default, we do not retain a GC alloc region for each ap;
-    // we'll override this, when appropriate, below
-    _retain_gc_alloc_region[ap]    = false;
-  }
-
-  // We will try to remember the last half-full tenured region we
-  // allocated to at the end of a collection so that we can re-use it
-  // during the next collection.
-  _retain_gc_alloc_region[GCAllocForTenured]  = true;
-
   guarantee(_task_queues != NULL, "task_queues allocation failure.");
 }
 
@@ -1901,12 +1866,27 @@
   PermanentGenerationSpec* pgs = collector_policy()->permanent_generation();
   // Includes the perm-gen.
 
-  const size_t total_reserved = max_byte_size + pgs->max_size();
+  // When compressed oops are enabled, the preferred heap base
+  // is calculated by subtracting the requested size from the
+  // 32Gb boundary and using the result as the base address for
+  // heap reservation. If the requested size is not aligned to
+  // HeapRegion::GrainBytes (i.e. the alignment that is passed
+  // into the ReservedHeapSpace constructor) then the actual
+  // base of the reserved heap may end up differing from the
+  // address that was requested (i.e. the preferred heap base).
+  // If this happens then we could end up using a non-optimal
+  // compressed oops mode.
+
+  // Since max_byte_size is aligned to the size of a heap region (checked
+  // above), we also need to align the perm gen size as it might not be.
+  const size_t total_reserved = max_byte_size +
+                                align_size_up(pgs->max_size(), HeapRegion::GrainBytes);
+  Universe::check_alignment(total_reserved, HeapRegion::GrainBytes, "g1 heap and perm");
+
   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 
-  ReservedSpace heap_rs(max_byte_size + pgs->max_size(),
-                        HeapRegion::GrainBytes,
-                        UseLargePages, addr);
+  ReservedHeapSpace heap_rs(total_reserved, HeapRegion::GrainBytes,
+                            UseLargePages, addr);
 
   if (UseCompressedOops) {
     if (addr != NULL && !heap_rs.is_reserved()) {
@@ -1914,14 +1894,17 @@
       // region is taken already, for example, by 'java' launcher.
       // Try again to reserver heap higher.
       addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
-      ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
-                             UseLargePages, addr);
+
+      ReservedHeapSpace heap_rs0(total_reserved, HeapRegion::GrainBytes,
+                                 UseLargePages, addr);
+
       if (addr != NULL && !heap_rs0.is_reserved()) {
         // Failed to reserve at specified address again - give up.
         addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
         assert(addr == NULL, "");
-        ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
-                               UseLargePages, addr);
+
+        ReservedHeapSpace heap_rs1(total_reserved, HeapRegion::GrainBytes,
+                                   UseLargePages, addr);
         heap_rs = heap_rs1;
       } else {
         heap_rs = heap_rs0;
@@ -2065,8 +2048,6 @@
   // counts and that mechanism.
   SpecializationStats::clear();
 
-  _gc_alloc_region_list = NULL;
-
   // Do later initialization work for concurrent refinement.
   _cg1r->init();
 
@@ -2187,27 +2168,6 @@
   return blk.result();
 }
 
-#ifndef PRODUCT
-class SumUsedRegionsClosure: public HeapRegionClosure {
-  size_t _num;
-public:
-  SumUsedRegionsClosure() : _num(0) {}
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->continuesHumongous() || r->used() > 0 || r->is_gc_alloc_region()) {
-      _num += 1;
-    }
-    return false;
-  }
-  size_t result() { return _num; }
-};
-
-size_t G1CollectedHeap::recalculate_used_regions() const {
-  SumUsedRegionsClosure blk;
-  heap_region_iterate(&blk);
-  return blk.result();
-}
-#endif // PRODUCT
-
 size_t G1CollectedHeap::unsafe_max_alloc() {
   if (free_regions() > 0) return HeapRegion::GrainBytes;
   // otherwise, is there space in the current allocation region?
@@ -3390,8 +3350,6 @@
       append_secondary_free_list_if_not_empty_with_lock();
     }
 
-    increment_gc_time_stamp();
-
     if (g1_policy()->in_young_gc_mode()) {
       assert(check_young_list_well_formed(),
              "young list should be well formed");
@@ -3402,6 +3360,7 @@
 
       gc_prologue(false);
       increment_total_collections(false /* full gc */);
+      increment_gc_time_stamp();
 
       if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
         HandleMark hm;  // Discard invalid handles created during verification
@@ -3454,7 +3413,7 @@
       if (g1_policy()->during_initial_mark_pause()) {
         concurrent_mark()->checkpointRootsInitialPre();
       }
-      save_marks();
+      perm_gen()->save_marks();
 
       // We must do this before any possible evacuation that should propagate
       // marks.
@@ -3516,8 +3475,8 @@
 
       setup_surviving_young_words();
 
-      // Set up the gc allocation regions.
-      get_gc_alloc_regions();
+      // Initialize the GC alloc regions.
+      init_gc_alloc_regions();
 
       // Actually do the work...
       evacuate_collection_set();
@@ -3562,7 +3521,7 @@
       } else {
         // The "used" of the the collection set have already been subtracted
         // when they were freed.  Add in the bytes evacuated.
-        _summary_bytes_used += g1_policy()->bytes_in_to_space();
+        _summary_bytes_used += g1_policy()->bytes_copied_during_gc();
       }
 
       if (g1_policy()->in_young_gc_mode() &&
@@ -3596,6 +3555,29 @@
 
       MemoryService::track_memory_usage();
 
+      // In prepare_for_verify() below we'll need to scan the deferred
+      // update buffers to bring the RSets up-to-date if
+      // G1HRRSFlushLogBuffersOnVerify has been set. While scanning
+      // the update buffers we'll probably need to scan cards on the
+      // regions we just allocated to (i.e., the GC alloc
+      // regions). However, during the last GC we called
+      // set_saved_mark() on all the GC alloc regions, so card
+      // scanning might skip the [saved_mark_word()...top()] area of
+      // those regions (i.e., the area we allocated objects into
+      // during the last GC). But it shouldn't. Given that
+      // saved_mark_word() is conditional on whether the GC time stamp
+      // on the region is current or not, by incrementing the GC time
+      // stamp here we invalidate all the GC time stamps on all the
+      // regions and saved_mark_word() will simply return top() for
+      // all the regions. This is a nicer way of ensuring this rather
+      // than iterating over the regions and fixing them. In fact, the
+      // GC time stamp increment here also ensures that
+      // saved_mark_word() will return top() between pauses, i.e.,
+      // during concurrent refinement. So we don't need the
+      // is_gc_active() check to decided which top to use when
+      // scanning cards (see CR 7039627).
+      increment_gc_time_stamp();
+
       if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
         HandleMark hm;  // Discard invalid handles created during verification
         gclog_or_tty->print(" VerifyAfterGC:");
@@ -3695,252 +3677,50 @@
   assert(_mutator_alloc_region.get() == NULL, "post-condition");
 }
 
-void G1CollectedHeap::set_gc_alloc_region(int purpose, HeapRegion* r) {
-  assert(purpose >= 0 && purpose < GCAllocPurposeCount, "invalid purpose");
-  // make sure we don't call set_gc_alloc_region() multiple times on
-  // the same region
-  assert(r == NULL || !r->is_gc_alloc_region(),
-         "shouldn't already be a GC alloc region");
-  assert(r == NULL || !r->isHumongous(),
-         "humongous regions shouldn't be used as GC alloc regions");
-
-  HeapWord* original_top = NULL;
-  if (r != NULL)
-    original_top = r->top();
-
-  // We will want to record the used space in r as being there before gc.
-  // One we install it as a GC alloc region it's eligible for allocation.
-  // So record it now and use it later.
-  size_t r_used = 0;
-  if (r != NULL) {
-    r_used = r->used();
-
-    if (G1CollectedHeap::use_parallel_gc_threads()) {
-      // need to take the lock to guard against two threads calling
-      // get_gc_alloc_region concurrently (very unlikely but...)
-      MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-      r->save_marks();
-    }
-  }
-  HeapRegion* old_alloc_region = _gc_alloc_regions[purpose];
-  _gc_alloc_regions[purpose] = r;
-  if (old_alloc_region != NULL) {
-    // Replace aliases too.
-    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      if (_gc_alloc_regions[ap] == old_alloc_region) {
-        _gc_alloc_regions[ap] = r;
-      }
-    }
-  }
-  if (r != NULL) {
-    push_gc_alloc_region(r);
-    if (mark_in_progress() && original_top != r->next_top_at_mark_start()) {
-      // We are using a region as a GC alloc region after it has been used
-      // as a mutator allocation region during the current marking cycle.
-      // The mutator-allocated objects are currently implicitly marked, but
-      // when we move hr->next_top_at_mark_start() forward at the the end
-      // of the GC pause, they won't be.  We therefore mark all objects in
-      // the "gap".  We do this object-by-object, since marking densely
-      // does not currently work right with marking bitmap iteration.  This
-      // means we rely on TLAB filling at the start of pauses, and no
-      // "resuscitation" of filled TLAB's.  If we want to do this, we need
-      // to fix the marking bitmap iteration.
-      HeapWord* curhw = r->next_top_at_mark_start();
-      HeapWord* t = original_top;
-
-      while (curhw < t) {
-        oop cur = (oop)curhw;
-        // We'll assume parallel for generality.  This is rare code.
-        concurrent_mark()->markAndGrayObjectIfNecessary(cur); // can't we just mark them?
-        curhw = curhw + cur->size();
-      }
-      assert(curhw == t, "Should have parsed correctly.");
-    }
-    if (G1PolicyVerbose > 1) {
-      gclog_or_tty->print("New alloc region ["PTR_FORMAT", "PTR_FORMAT", " PTR_FORMAT") "
-                          "for survivors:", r->bottom(), original_top, r->end());
-      r->print();
-    }
-    g1_policy()->record_before_bytes(r_used);
+void G1CollectedHeap::init_gc_alloc_regions() {
+  assert_at_safepoint(true /* should_be_vm_thread */);
+
+  _survivor_gc_alloc_region.init();
+  _old_gc_alloc_region.init();
+  HeapRegion* retained_region = _retained_old_gc_alloc_region;
+  _retained_old_gc_alloc_region = NULL;
+
+  // We will discard the current GC alloc region if:
+  // a) it's in the collection set (it can happen!),
+  // b) it's already full (no point in using it),
+  // c) it's empty (this means that it was emptied during
+  // a cleanup and it should be on the free list now), or
+  // d) it's humongous (this means that it was emptied
+  // during a cleanup and was added to the free list, but
+  // has been subseqently used to allocate a humongous
+  // object that may be less than the region size).
+  if (retained_region != NULL &&
+      !retained_region->in_collection_set() &&
+      !(retained_region->top() == retained_region->end()) &&
+      !retained_region->is_empty() &&
+      !retained_region->isHumongous()) {
+    retained_region->set_saved_mark();
+    _old_gc_alloc_region.set(retained_region);
+    _hr_printer.reuse(retained_region);
   }
 }
 
-void G1CollectedHeap::push_gc_alloc_region(HeapRegion* hr) {
-  assert(Thread::current()->is_VM_thread() ||
-         FreeList_lock->owned_by_self(), "Precondition");
-  assert(!hr->is_gc_alloc_region() && !hr->in_collection_set(),
-         "Precondition.");
-  hr->set_is_gc_alloc_region(true);
-  hr->set_next_gc_alloc_region(_gc_alloc_region_list);
-  _gc_alloc_region_list = hr;
-}
-
-#ifdef G1_DEBUG
-class FindGCAllocRegion: public HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* r) {
-    if (r->is_gc_alloc_region()) {
-      gclog_or_tty->print_cr("Region "HR_FORMAT" is still a GC alloc region",
-                             HR_FORMAT_PARAMS(r));
-    }
-    return false;
-  }
-};
-#endif // G1_DEBUG
-
-void G1CollectedHeap::forget_alloc_region_list() {
-  assert_at_safepoint(true /* should_be_vm_thread */);
-  while (_gc_alloc_region_list != NULL) {
-    HeapRegion* r = _gc_alloc_region_list;
-    assert(r->is_gc_alloc_region(), "Invariant.");
-    // We need HeapRegion::oops_on_card_seq_iterate_careful() to work on
-    // newly allocated data in order to be able to apply deferred updates
-    // before the GC is done for verification purposes (i.e to allow
-    // G1HRRSFlushLogBuffersOnVerify). It's safe thing to do after the
-    // collection.
-    r->ContiguousSpace::set_saved_mark();
-    _gc_alloc_region_list = r->next_gc_alloc_region();
-    r->set_next_gc_alloc_region(NULL);
-    r->set_is_gc_alloc_region(false);
-    if (r->is_survivor()) {
-      if (r->is_empty()) {
-        r->set_not_young();
-      } else {
-        _young_list->add_survivor_region(r);
-      }
-    }
-  }
-#ifdef G1_DEBUG
-  FindGCAllocRegion fa;
-  heap_region_iterate(&fa);
-#endif // G1_DEBUG
-}
-
-
-bool G1CollectedHeap::check_gc_alloc_regions() {
-  // TODO: allocation regions check
-  return true;
+void G1CollectedHeap::release_gc_alloc_regions() {
+  _survivor_gc_alloc_region.release();
+  // If we have an old GC alloc region to release, we'll save it in
+  // _retained_old_gc_alloc_region. If we don't
+  // _retained_old_gc_alloc_region will become NULL. This is what we
+  // want either way so no reason to check explicitly for either
+  // condition.
+  _retained_old_gc_alloc_region = _old_gc_alloc_region.release();
 }
 
-void G1CollectedHeap::get_gc_alloc_regions() {
-  // First, let's check that the GC alloc region list is empty (it should)
-  assert(_gc_alloc_region_list == NULL, "invariant");
-
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    assert(_gc_alloc_regions[ap] == NULL, "invariant");
-    assert(_gc_alloc_region_counts[ap] == 0, "invariant");
-
-    // Create new GC alloc regions.
-    HeapRegion* alloc_region = _retained_gc_alloc_regions[ap];
-    _retained_gc_alloc_regions[ap] = NULL;
-
-    if (alloc_region != NULL) {
-      assert(_retain_gc_alloc_region[ap], "only way to retain a GC region");
-
-      // let's make sure that the GC alloc region is not tagged as such
-      // outside a GC operation
-      assert(!alloc_region->is_gc_alloc_region(), "sanity");
-
-      if (alloc_region->in_collection_set() ||
-          alloc_region->top() == alloc_region->end() ||
-          alloc_region->top() == alloc_region->bottom() ||
-          alloc_region->isHumongous()) {
-        // we will discard the current GC alloc region if
-        // * it's in the collection set (it can happen!),
-        // * it's already full (no point in using it),
-        // * it's empty (this means that it was emptied during
-        // a cleanup and it should be on the free list now), or
-        // * it's humongous (this means that it was emptied
-        // during a cleanup and was added to the free list, but
-        // has been subseqently used to allocate a humongous
-        // object that may be less than the region size).
-
-        alloc_region = NULL;
-      }
-    }
-
-    if (alloc_region == NULL) {
-      // we will get a new GC alloc region
-      alloc_region = new_gc_alloc_region(ap, HeapRegion::GrainWords);
-    } else {
-      // the region was retained from the last collection
-      ++_gc_alloc_region_counts[ap];
-
-      _hr_printer.reuse(alloc_region);
-    }
-
-    if (alloc_region != NULL) {
-      assert(_gc_alloc_regions[ap] == NULL, "pre-condition");
-      set_gc_alloc_region(ap, alloc_region);
-    }
-
-    assert(_gc_alloc_regions[ap] == NULL ||
-           _gc_alloc_regions[ap]->is_gc_alloc_region(),
-           "the GC alloc region should be tagged as such");
-    assert(_gc_alloc_regions[ap] == NULL ||
-           _gc_alloc_regions[ap] == _gc_alloc_region_list,
-           "the GC alloc region should be the same as the GC alloc list head");
-  }
-  // Set alternative regions for allocation purposes that have reached
-  // their limit.
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(ap);
-    if (_gc_alloc_regions[ap] == NULL && alt_purpose != ap) {
-      _gc_alloc_regions[ap] = _gc_alloc_regions[alt_purpose];
-    }
-  }
-  assert(check_gc_alloc_regions(), "alloc regions messed up");
+void G1CollectedHeap::abandon_gc_alloc_regions() {
+  assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
+  assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
+  _retained_old_gc_alloc_region = NULL;
 }
 
-void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
-  // We keep a separate list of all regions that have been alloc regions in
-  // the current collection pause. Forget that now. This method will
-  // untag the GC alloc regions and tear down the GC alloc region
-  // list. It's desirable that no regions are tagged as GC alloc
-  // outside GCs.
-
-  forget_alloc_region_list();
-
-  // The current alloc regions contain objs that have survived
-  // collection. Make them no longer GC alloc regions.
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    _retained_gc_alloc_regions[ap] = NULL;
-    _gc_alloc_region_counts[ap] = 0;
-
-    if (r != NULL) {
-      // we retain nothing on _gc_alloc_regions between GCs
-      set_gc_alloc_region(ap, NULL);
-
-      if (r->is_empty()) {
-        // We didn't actually allocate anything in it; let's just put
-        // it back on the free list.
-        _free_list.add_as_head(r);
-      } else if (_retain_gc_alloc_region[ap] && !totally) {
-        // retain it so that we can use it at the beginning of the next GC
-        _retained_gc_alloc_regions[ap] = r;
-      }
-    }
-  }
-}
-
-#ifndef PRODUCT
-// Useful for debugging
-
-void G1CollectedHeap::print_gc_alloc_regions() {
-  gclog_or_tty->print_cr("GC alloc regions");
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    if (r == NULL) {
-      gclog_or_tty->print_cr("  %2d : "PTR_FORMAT, ap, NULL);
-    } else {
-      gclog_or_tty->print_cr("  %2d : "PTR_FORMAT" "SIZE_FORMAT,
-                             ap, r->bottom(), r->used());
-    }
-  }
-}
-#endif // PRODUCT
-
 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   _drain_in_progress = false;
   set_evac_failure_closure(cl);
@@ -3956,8 +3736,6 @@
   _evac_failure_scan_stack = NULL;
 }
 
-
-
 // *** Sequential G1 Evacuation
 
 class G1IsAliveClosure: public BoolObjectClosure {
@@ -4269,136 +4047,32 @@
   }
 }
 
-// *** Parallel G1 Evacuation
-
 HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
                                                   size_t word_size) {
-  assert(!isHumongous(word_size),
-         err_msg("we should not be seeing humongous allocation requests "
-                 "during GC, word_size = "SIZE_FORMAT, word_size));
-
-  HeapRegion* alloc_region = _gc_alloc_regions[purpose];
-  // let the caller handle alloc failure
-  if (alloc_region == NULL) return NULL;
-
-  HeapWord* block = alloc_region->par_allocate(word_size);
-  if (block == NULL) {
-    block = allocate_during_gc_slow(purpose, alloc_region, true, word_size);
-  }
-  return block;
-}
-
-void G1CollectedHeap::retire_alloc_region(HeapRegion* alloc_region,
-                                            bool par) {
-  // Another thread might have obtained alloc_region for the given
-  // purpose, and might be attempting to allocate in it, and might
-  // succeed.  Therefore, we can't do the "finalization" stuff on the
-  // region below until we're sure the last allocation has happened.
-  // We ensure this by allocating the remaining space with a garbage
-  // object.
-  if (par) par_allocate_remaining_space(alloc_region);
-  // Now we can do the post-GC stuff on the region.
-  alloc_region->note_end_of_copying();
-  g1_policy()->record_after_bytes(alloc_region->used());
-  _hr_printer.retire(alloc_region);
-}
-
-HeapWord*
-G1CollectedHeap::allocate_during_gc_slow(GCAllocPurpose purpose,
-                                         HeapRegion*    alloc_region,
-                                         bool           par,
-                                         size_t         word_size) {
-  assert(!isHumongous(word_size),
-         err_msg("we should not be seeing humongous allocation requests "
-                 "during GC, word_size = "SIZE_FORMAT, word_size));
-
-  // We need to make sure we serialize calls to this method. Given
-  // that the FreeList_lock guards accesses to the free_list anyway,
-  // and we need to potentially remove a region from it, we'll use it
-  // to protect the whole call.
-  MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-
-  HeapWord* block = NULL;
-  // In the parallel case, a previous thread to obtain the lock may have
-  // already assigned a new gc_alloc_region.
-  if (alloc_region != _gc_alloc_regions[purpose]) {
-    assert(par, "But should only happen in parallel case.");
-    alloc_region = _gc_alloc_regions[purpose];
-    if (alloc_region == NULL) return NULL;
-    block = alloc_region->par_allocate(word_size);
-    if (block != NULL) return block;
-    // Otherwise, continue; this new region is empty, too.
-  }
-  assert(alloc_region != NULL, "We better have an allocation region");
-  retire_alloc_region(alloc_region, par);
-
-  if (_gc_alloc_region_counts[purpose] >= g1_policy()->max_regions(purpose)) {
-    // Cannot allocate more regions for the given purpose.
-    GCAllocPurpose alt_purpose = g1_policy()->alternative_purpose(purpose);
-    // Is there an alternative?
-    if (purpose != alt_purpose) {
-      HeapRegion* alt_region = _gc_alloc_regions[alt_purpose];
-      // Has not the alternative region been aliased?
-      if (alloc_region != alt_region && alt_region != NULL) {
-        // Try to allocate in the alternative region.
-        if (par) {
-          block = alt_region->par_allocate(word_size);
-        } else {
-          block = alt_region->allocate(word_size);
-        }
-        // Make an alias.
-        _gc_alloc_regions[purpose] = _gc_alloc_regions[alt_purpose];
-        if (block != NULL) {
-          return block;
-        }
-        retire_alloc_region(alt_region, par);
-      }
-      // Both the allocation region and the alternative one are full
-      // and aliased, replace them with a new allocation region.
-      purpose = alt_purpose;
+  if (purpose == GCAllocForSurvived) {
+    HeapWord* result = survivor_attempt_allocation(word_size);
+    if (result != NULL) {
+      return result;
     } else {
-      set_gc_alloc_region(purpose, NULL);
-      return NULL;
+      // Let's try to allocate in the old gen in case we can fit the
+      // object there.
+      return old_attempt_allocation(word_size);
     }
-  }
-
-  // Now allocate a new region for allocation.
-  alloc_region = new_gc_alloc_region(purpose, word_size);
-
-  // let the caller handle alloc failure
-  if (alloc_region != NULL) {
-
-    assert(check_gc_alloc_regions(), "alloc regions messed up");
-    assert(alloc_region->saved_mark_at_top(),
-           "Mark should have been saved already.");
-    // This must be done last: once it's installed, other regions may
-    // allocate in it (without holding the lock.)
-    set_gc_alloc_region(purpose, alloc_region);
-
-    if (par) {
-      block = alloc_region->par_allocate(word_size);
+  } else {
+    assert(purpose ==  GCAllocForTenured, "sanity");
+    HeapWord* result = old_attempt_allocation(word_size);
+    if (result != NULL) {
+      return result;
     } else {
-      block = alloc_region->allocate(word_size);
+      // Let's try to allocate in the survivors in case we can fit the
+      // object there.
+      return survivor_attempt_allocation(word_size);
     }
-    // Caller handles alloc failure.
-  } else {
-    // This sets other apis using the same old alloc region to NULL, also.
-    set_gc_alloc_region(purpose, NULL);
-  }
-  return block;  // May be NULL.
-}
-
-void G1CollectedHeap::par_allocate_remaining_space(HeapRegion* r) {
-  HeapWord* block = NULL;
-  size_t free_words;
-  do {
-    free_words = r->free()/HeapWordSize;
-    // If there's too little space, no one can allocate, so we're done.
-    if (free_words < CollectedHeap::min_fill_size()) return;
-    // Otherwise, try to claim it.
-    block = r->par_allocate(free_words);
-  } while (block == NULL);
-  fill_with_object(block, free_words);
+  }
+
+  ShouldNotReachHere();
+  // Trying to keep some compilers happy.
+  return NULL;
 }
 
 #ifndef PRODUCT
@@ -4834,6 +4508,7 @@
                                   scan_perm_cl,
                                   i);
     pss.end_strong_roots();
+
     {
       double start = os::elapsedTime();
       G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
@@ -4890,17 +4565,29 @@
                        &eager_scan_code_roots,
                        &buf_scan_perm);
 
-  // Finish up any enqueued closure apps.
+  // Now the ref_processor roots.
+  if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
+    // We need to treat the discovered reference lists as roots and
+    // keep entries (which are added by the marking threads) on them
+    // live until they can be processed at the end of marking.
+    ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
+    ref_processor()->oops_do(&buf_scan_non_heap_roots);
+  }
+
+  // Finish up any enqueued closure apps (attributed as object copy time).
   buf_scan_non_heap_roots.done();
   buf_scan_perm.done();
+
   double ext_roots_end = os::elapsedTime();
+
   g1_policy()->reset_obj_copy_time(worker_i);
-  double obj_copy_time_sec =
-    buf_scan_non_heap_roots.closure_app_seconds() +
-    buf_scan_perm.closure_app_seconds();
+  double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
+                                buf_scan_non_heap_roots.closure_app_seconds();
   g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
+
   double ext_root_time_ms =
     ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
+
   g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
 
   // Scan strong roots in mark stack.
@@ -4910,21 +4597,11 @@
   double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
   g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
 
-  // XXX What should this be doing in the parallel case?
-  g1_policy()->record_collection_pause_end_CH_strong_roots();
   // Now scan the complement of the collection set.
   if (scan_rs != NULL) {
     g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
   }
-  // Finish with the ref_processor roots.
-  if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
-    // We need to treat the discovered reference lists as roots and
-    // keep entries (which are added by the marking threads) on them
-    // live until they can be processed at the end of marking.
-    ref_processor()->weak_oops_do(scan_non_heap_roots);
-    ref_processor()->oops_do(scan_non_heap_roots);
-  }
-  g1_policy()->record_collection_pause_end_G1_strong_roots();
+
   _process_strong_tasks->all_tasks_completed();
 }
 
@@ -4935,24 +4612,6 @@
   SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
 }
 
-
-class SaveMarksClosure: public HeapRegionClosure {
-public:
-  bool doHeapRegion(HeapRegion* r) {
-    r->save_marks();
-    return false;
-  }
-};
-
-void G1CollectedHeap::save_marks() {
-  if (!CollectedHeap::use_parallel_gc_threads()) {
-    SaveMarksClosure sm;
-    heap_region_iterate(&sm);
-  }
-  // We do this even in the parallel case
-  perm_gen()->save_marks();
-}
-
 void G1CollectedHeap::evacuate_collection_set() {
   set_evacuation_failed(false);
 
@@ -4983,10 +4642,6 @@
   double par_time = (os::elapsedTime() - start_par) * 1000.0;
   g1_policy()->record_par_time(par_time);
   set_par_threads(0);
-  // Is this the right thing to do here?  We don't save marks
-  // on individual heap regions when we allocate from
-  // them in parallel, so this seems like the correct place for this.
-  retire_all_alloc_regions();
 
   // Weak root processing.
   // Note: when JSR 292 is enabled and code blobs can contain
@@ -4997,7 +4652,7 @@
     G1KeepAliveClosure keep_alive(this);
     JNIHandles::weak_oops_do(&is_alive, &keep_alive);
   }
-  release_gc_alloc_regions(false /* totally */);
+  release_gc_alloc_regions();
   g1_rem_set()->cleanup_after_oops_into_collection_set_do();
 
   concurrent_g1_refine()->clear_hot_cache();
@@ -5118,68 +4773,31 @@
   }
 }
 
-void G1CollectedHeap::dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list) {
-  while (list != NULL) {
-    guarantee( list->is_young(), "invariant" );
-
-    HeapWord* bottom = list->bottom();
-    HeapWord* end = list->end();
-    MemRegion mr(bottom, end);
-    ct_bs->dirty(mr);
-
-    list = list->get_next_young_region();
-  }
-}
-
-
 class G1ParCleanupCTTask : public AbstractGangTask {
   CardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
   HeapRegion* volatile _su_head;
 public:
   G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
-                     G1CollectedHeap* g1h,
-                     HeapRegion* survivor_list) :
+                     G1CollectedHeap* g1h) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
-    _ct_bs(ct_bs),
-    _g1h(g1h),
-    _su_head(survivor_list)
-  { }
+    _ct_bs(ct_bs), _g1h(g1h) { }
 
   void work(int i) {
     HeapRegion* r;
     while (r = _g1h->pop_dirty_cards_region()) {
       clear_cards(r);
     }
-    // Redirty the cards of the survivor regions.
-    dirty_list(&this->_su_head);
   }
 
   void clear_cards(HeapRegion* r) {
-    // Cards for Survivor regions will be dirtied later.
+    // Cards of the survivors should have already been dirtied.
     if (!r->is_survivor()) {
       _ct_bs->clear(MemRegion(r->bottom(), r->end()));
     }
   }
-
-  void dirty_list(HeapRegion* volatile * head_ptr) {
-    HeapRegion* head;
-    do {
-      // Pop region off the list.
-      head = *head_ptr;
-      if (head != NULL) {
-        HeapRegion* r = (HeapRegion*)
-          Atomic::cmpxchg_ptr(head->get_next_young_region(), head_ptr, head);
-        if (r == head) {
-          assert(!r->isHumongous(), "Humongous regions shouldn't be on survivor list");
-          _ct_bs->dirty(MemRegion(r->bottom(), r->end()));
-        }
-      }
-    } while (*head_ptr != NULL);
-  }
 };
 
-
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
   G1CollectedHeap* _g1h;
@@ -5235,8 +4853,7 @@
   double start = os::elapsedTime();
 
   // Iterate over the dirty cards region list.
-  G1ParCleanupCTTask cleanup_task(ct_bs, this,
-                                  _young_list->first_survivor_region());
+  G1ParCleanupCTTask cleanup_task(ct_bs, this);
 
   if (ParallelGCThreads > 0) {
     set_par_threads(workers()->total_workers());
@@ -5253,10 +4870,6 @@
       }
       r->set_next_dirty_cards_region(NULL);
     }
-    // now, redirty the cards of the survivor regions
-    // (it seemed faster to do it this way, instead of iterating over
-    // all regions and then clearing / dirtying as appropriate)
-    dirtyCardsForYoungRegions(ct_bs, _young_list->first_survivor_region());
   }
 
   double elapsed = os::elapsedTime() - start;
@@ -5483,34 +5096,6 @@
   _young_list->empty_list();
 }
 
-bool G1CollectedHeap::all_alloc_regions_no_allocs_since_save_marks() {
-  bool no_allocs = true;
-  for (int ap = 0; ap < GCAllocPurposeCount && no_allocs; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    no_allocs = r == NULL || r->saved_mark_at_top();
-  }
-  return no_allocs;
-}
-
-void G1CollectedHeap::retire_all_alloc_regions() {
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    HeapRegion* r = _gc_alloc_regions[ap];
-    if (r != NULL) {
-      // Check for aliases.
-      bool has_processed_alias = false;
-      for (int i = 0; i < ap; ++i) {
-        if (_gc_alloc_regions[i] == r) {
-          has_processed_alias = true;
-          break;
-        }
-      }
-      if (!has_processed_alias) {
-        retire_alloc_region(r, false /* par */);
-      }
-    }
-  }
-}
-
 // Done at the start of full GC.
 void G1CollectedHeap::tear_down_region_lists() {
   _free_list.remove_all();
@@ -5565,6 +5150,8 @@
   }
 }
 
+// Methods for the mutator alloc region
+
 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
                                                       bool force) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
@@ -5605,6 +5192,69 @@
   _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
 }
 
+// Methods for the GC alloc regions
+
+HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
+                                                 size_t count,
+                                                 GCAllocPurpose ap) {
+  assert(FreeList_lock->owned_by_self(), "pre-condition");
+
+  if (count < g1_policy()->max_regions(ap)) {
+    HeapRegion* new_alloc_region = new_region(word_size,
+                                              true /* do_expand */);
+    if (new_alloc_region != NULL) {
+      // We really only need to do this for old regions given that we
+      // should never scan survivors. But it doesn't hurt to do it
+      // for survivors too.
+      new_alloc_region->set_saved_mark();
+      if (ap == GCAllocForSurvived) {
+        new_alloc_region->set_survivor();
+        _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
+      } else {
+        _hr_printer.alloc(new_alloc_region, G1HRPrinter::Old);
+      }
+      return new_alloc_region;
+    } else {
+      g1_policy()->note_alloc_region_limit_reached(ap);
+    }
+  }
+  return NULL;
+}
+
+void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
+                                             size_t allocated_bytes,
+                                             GCAllocPurpose ap) {
+  alloc_region->note_end_of_copying();
+  g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
+  if (ap == GCAllocForSurvived) {
+    young_list()->add_survivor_region(alloc_region);
+  }
+  _hr_printer.retire(alloc_region);
+}
+
+HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
+                                                       bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+}
+
+void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                          size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForSurvived);
+}
+
+HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
+                                                  bool force) {
+  assert(!force, "not supported for GC alloc regions");
+  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+}
+
+void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
+                                     size_t allocated_bytes) {
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
+                               GCAllocForTenured);
+}
 // Heap region set verification
 
 class VerifyRegionListsClosure : public HeapRegionClosure {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Aug 16 08:02:29 2011 -0700
@@ -155,6 +155,24 @@
     : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
 };
 
+class SurvivorGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  SurvivorGCAllocRegion()
+  : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
+};
+
+class OldGCAllocRegion : public G1AllocRegion {
+protected:
+  virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
+  virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
+public:
+  OldGCAllocRegion()
+  : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
+};
+
 class RefineCardTableEntryClosure;
 class G1CollectedHeap : public SharedHeap {
   friend class VM_G1CollectForAllocation;
@@ -163,6 +181,8 @@
   friend class VM_G1IncCollectionPause;
   friend class VMStructs;
   friend class MutatorAllocRegion;
+  friend class SurvivorGCAllocRegion;
+  friend class OldGCAllocRegion;
 
   // Closures used in implementation.
   friend class G1ParCopyHelper;
@@ -225,30 +245,33 @@
   // Alloc region used to satisfy mutator allocation requests.
   MutatorAllocRegion _mutator_alloc_region;
 
+  // Alloc region used to satisfy allocation requests by the GC for
+  // survivor objects.
+  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+
+  // Alloc region used to satisfy allocation requests by the GC for
+  // old objects.
+  OldGCAllocRegion _old_gc_alloc_region;
+
+  // The last old region we allocated to during the last GC.
+  // Typically, it is not full so we should re-use it during the next GC.
+  HeapRegion* _retained_old_gc_alloc_region;
+
   // It resets the mutator alloc region before new allocations can take place.
   void init_mutator_alloc_region();
 
   // It releases the mutator alloc region.
   void release_mutator_alloc_region();
 
-  void abandon_gc_alloc_regions();
+  // It initializes the GC alloc regions at the start of a GC.
+  void init_gc_alloc_regions();
 
-  // The to-space memory regions into which objects are being copied during
-  // a GC.
-  HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
-  size_t _gc_alloc_region_counts[GCAllocPurposeCount];
-  // These are the regions, one per GCAllocPurpose, that are half-full
-  // at the end of a collection and that we want to reuse during the
-  // next collection.
-  HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount];
-  // This specifies whether we will keep the last half-full region at
-  // the end of a collection so that it can be reused during the next
-  // collection (this is specified per GCAllocPurpose)
-  bool _retain_gc_alloc_region[GCAllocPurposeCount];
+  // It releases the GC alloc regions at the end of a GC.
+  void release_gc_alloc_regions();
 
-  // A list of the regions that have been set to be alloc regions in the
-  // current collection.
-  HeapRegion* _gc_alloc_region_list;
+  // It does any cleanup that needs to be done on the GC alloc regions
+  // before a Full GC.
+  void abandon_gc_alloc_regions();
 
   // Helper for monitoring and management support.
   G1MonitoringSupport* _g1mm;
@@ -256,20 +279,6 @@
   // Determines PLAB size for a particular allocation purpose.
   static size_t desired_plab_sz(GCAllocPurpose purpose);
 
-  // When called by par thread, requires the FreeList_lock to be held.
-  void push_gc_alloc_region(HeapRegion* hr);
-
-  // This should only be called single-threaded.  Undeclares all GC alloc
-  // regions.
-  void forget_alloc_region_list();
-
-  // Should be used to set an alloc region, because there's other
-  // associated bookkeeping.
-  void set_gc_alloc_region(int purpose, HeapRegion* r);
-
-  // Check well-formedness of alloc region list.
-  bool check_gc_alloc_regions();
-
   // Outside of GC pauses, the number of bytes used in all regions other
   // than the current allocation region.
   size_t _summary_bytes_used;
@@ -387,14 +396,7 @@
 
 protected:
 
-  // Returns "true" iff none of the gc alloc regions have any allocations
-  // since the last call to "save_marks".
-  bool all_alloc_regions_no_allocs_since_save_marks();
-  // Perform finalization stuff on all allocation regions.
-  void retire_all_alloc_regions();
-
-  // The number of regions allocated to hold humongous objects.
-  int         _num_humongous_regions;
+  // The young region list.
   YoungList*  _young_list;
 
   // The current policy object for the collector.
@@ -413,11 +415,6 @@
   // request.
   HeapRegion* new_region(size_t word_size, bool do_expand);
 
-  // Try to allocate a new region to be used for allocation by
-  // a GC thread. It will try to expand the heap if no region is
-  // available.
-  HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
-
   // Attempt to satisfy a humongous allocation request of the given
   // size by finding a contiguous set of free regions of num_regions
   // length and remove them from the master free list. Return the
@@ -525,16 +522,25 @@
   // that parallel threads might be attempting allocations.
   void par_allocate_remaining_space(HeapRegion* r);
 
-  // Retires an allocation region when it is full or at the end of a
-  // GC pause.
-  void  retire_alloc_region(HeapRegion* alloc_region, bool par);
+  // Allocation attempt during GC for a survivor object / PLAB.
+  inline HeapWord* survivor_attempt_allocation(size_t word_size);
 
-  // These two methods are the "callbacks" from the G1AllocRegion class.
+  // Allocation attempt during GC for an old object / PLAB.
+  inline HeapWord* old_attempt_allocation(size_t word_size);
 
+  // These methods are the "callbacks" from the G1AllocRegion class.
+
+  // For mutator alloc regions.
   HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
   void retire_mutator_alloc_region(HeapRegion* alloc_region,
                                    size_t allocated_bytes);
 
+  // For GC alloc regions.
+  HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
+                                  GCAllocPurpose ap);
+  void retire_gc_alloc_region(HeapRegion* alloc_region,
+                              size_t allocated_bytes, GCAllocPurpose ap);
+
   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   //   inspection request and should collect the entire heap
   // - if clear_all_soft_refs is true, all soft references should be
@@ -728,9 +734,6 @@
   void g1_process_weak_roots(OopClosure* root_closure,
                              OopClosure* non_root_closure);
 
-  // Invoke "save_marks" on all heap regions.
-  void save_marks();
-
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
   // usually a local list which will be appended to the master free
@@ -822,24 +825,6 @@
   oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
   void handle_evacuation_failure_common(oop obj, markOop m);
 
-  // Ensure that the relevant gc_alloc regions are set.
-  void get_gc_alloc_regions();
-  // We're done with GC alloc regions. We are going to tear down the
-  // gc alloc list and remove the gc alloc tag from all the regions on
-  // that list. However, we will also retain the last (i.e., the one
-  // that is half-full) GC alloc region, per GCAllocPurpose, for
-  // possible reuse during the next collection, provided
-  // _retain_gc_alloc_region[] indicates that it should be the
-  // case. Said regions are kept in the _retained_gc_alloc_regions[]
-  // array. If the parameter totally is set, we will not retain any
-  // regions, irrespective of what _retain_gc_alloc_region[]
-  // indicates.
-  void release_gc_alloc_regions(bool totally);
-#ifndef PRODUCT
-  // Useful for debugging.
-  void print_gc_alloc_regions();
-#endif // !PRODUCT
-
   // Instance of the concurrent mark is_alive closure for embedding
   // into the reference processor as the is_alive_non_header. This
   // prevents unnecessary additions to the discovered lists during
@@ -948,9 +933,6 @@
   // result might be a bit inaccurate.
   size_t used_unlocked() const;
   size_t recalculate_used() const;
-#ifndef PRODUCT
-  size_t recalculate_used_regions() const;
-#endif // PRODUCT
 
   // These virtual functions do the actual allocation.
   // Some heaps may offer a contiguous region for shared non-blocking
@@ -1110,9 +1092,6 @@
 
   virtual bool is_in_closed_subset(const void* p) const;
 
-  // Dirty card table entries covering a list of young regions.
-  void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
-
   // This resets the card table to all zeros.  It is used after
   // a collection pause which used the card table to claim cards.
   void cleanUpCardTable();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Aug 16 08:02:29 2011 -0700
@@ -77,6 +77,38 @@
   return result;
 }
 
+inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
+                                                              word_size) {
+  assert(!isHumongous(word_size),
+         "we should not be seeing humongous-size allocations in this path");
+
+  HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
+                                                      false /* bot_updates */);
+  if (result == NULL) {
+    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+    result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
+                                                      false /* bot_updates */);
+  }
+  if (result != NULL) {
+    dirty_young_block(result, word_size);
+  }
+  return result;
+}
+
+inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
+  assert(!isHumongous(word_size),
+         "we should not be seeing humongous-size allocations in this path");
+
+  HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
+                                                       true /* bot_updates */);
+  if (result == NULL) {
+    MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
+    result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
+                                                       true /* bot_updates */);
+  }
+  return result;
+}
+
 // It dirties the cards that cover the block so that so that the post
 // write barrier never queues anything when updating objects on this
 // block. It is assumed (and in fact we assert) that the block
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -134,13 +134,10 @@
 
 G1CollectorPolicy::G1CollectorPolicy() :
   _parallel_gc_threads(G1CollectedHeap::use_parallel_gc_threads()
-    ? ParallelGCThreads : 1),
-
+                        ? ParallelGCThreads : 1),
 
   _n_pauses(0),
-  _recent_CH_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
-  _recent_G1_strong_roots_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
-  _recent_evac_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
+  _recent_rs_scan_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _recent_pause_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _recent_rs_sizes(new TruncatedSeq(NumPrevPausesForHeuristics)),
   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
@@ -862,14 +859,6 @@
   calculate_young_list_target_length();
 }
 
-void G1CollectorPolicy::record_before_bytes(size_t bytes) {
-  _bytes_in_to_space_before_gc += bytes;
-}
-
-void G1CollectorPolicy::record_after_bytes(size_t bytes) {
-  _bytes_in_to_space_after_gc += bytes;
-}
-
 void G1CollectorPolicy::record_stop_world_start() {
   _stop_world_start = os::elapsedTime();
 }
@@ -897,9 +886,8 @@
   _pending_cards = _g1->pending_card_num();
   _max_pending_cards = _g1->max_pending_card_num();
 
-  _bytes_in_to_space_before_gc = 0;
-  _bytes_in_to_space_after_gc = 0;
   _bytes_in_collection_set_before_gc = 0;
+  _bytes_copied_during_gc = 0;
 
   YoungList* young_list = _g1->young_list();
   _eden_bytes_before_gc = young_list->eden_used_bytes();
@@ -1050,18 +1038,6 @@
 void G1CollectorPolicy::record_concurrent_pause_end() {
 }
 
-void G1CollectorPolicy::record_collection_pause_end_CH_strong_roots() {
-  _cur_CH_strong_roots_end_sec = os::elapsedTime();
-  _cur_CH_strong_roots_dur_ms =
-    (_cur_CH_strong_roots_end_sec - _cur_collection_start_sec) * 1000.0;
-}
-
-void G1CollectorPolicy::record_collection_pause_end_G1_strong_roots() {
-  _cur_G1_strong_roots_end_sec = os::elapsedTime();
-  _cur_G1_strong_roots_dur_ms =
-    (_cur_G1_strong_roots_end_sec - _cur_CH_strong_roots_end_sec) * 1000.0;
-}
-
 template<class T>
 T sum_of(T* sum_arr, int start, int n, int N) {
   T sum = (T)0;
@@ -1183,7 +1159,6 @@
   double end_time_sec = os::elapsedTime();
   double elapsed_ms = _last_pause_time_ms;
   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
-  double evac_ms = (end_time_sec - _cur_G1_strong_roots_end_sec) * 1000.0;
   size_t rs_size =
     _cur_collection_pause_used_regions_at_start - collection_set_size();
   size_t cur_used_bytes = _g1->used();
@@ -1256,14 +1231,52 @@
 
   _n_pauses++;
 
+  double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
+  double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
+  double update_rs_time = avg_value(_par_last_update_rs_times_ms);
+  double update_rs_processed_buffers =
+    sum_of_values(_par_last_update_rs_processed_buffers);
+  double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
+  double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
+  double termination_time = avg_value(_par_last_termination_times_ms);
+
+  double parallel_known_time = update_rs_time +
+                               ext_root_scan_time +
+                               mark_stack_scan_time +
+                               scan_rs_time +
+                               obj_copy_time +
+                               termination_time;
+
+  double parallel_other_time = _cur_collection_par_time_ms - parallel_known_time;
+
+  PauseSummary* summary = _summary;
+
   if (update_stats) {
-    _recent_CH_strong_roots_times_ms->add(_cur_CH_strong_roots_dur_ms);
-    _recent_G1_strong_roots_times_ms->add(_cur_G1_strong_roots_dur_ms);
-    _recent_evac_times_ms->add(evac_ms);
+    _recent_rs_scan_times_ms->add(scan_rs_time);
     _recent_pause_times_ms->add(elapsed_ms);
-
     _recent_rs_sizes->add(rs_size);
 
+    MainBodySummary* body_summary = summary->main_body_summary();
+    guarantee(body_summary != NULL, "should not be null!");
+
+    if (_satb_drain_time_set)
+      body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
+    else
+      body_summary->record_satb_drain_time_ms(0.0);
+
+    body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
+    body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
+    body_summary->record_update_rs_time_ms(update_rs_time);
+    body_summary->record_scan_rs_time_ms(scan_rs_time);
+    body_summary->record_obj_copy_time_ms(obj_copy_time);
+    if (parallel) {
+      body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
+      body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
+      body_summary->record_termination_time_ms(termination_time);
+      body_summary->record_parallel_other_time_ms(parallel_other_time);
+    }
+    body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
+
     // We exempt parallel collection from this check because Alloc Buffer
     // fragmentation can produce negative collections.  Same with evac
     // failure.
@@ -1328,56 +1341,12 @@
     gclog_or_tty->print_cr("   Recording collection pause(%d)", _n_pauses);
   }
 
-  PauseSummary* summary = _summary;
-
-  double ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
-  double mark_stack_scan_time = avg_value(_par_last_mark_stack_scan_times_ms);
-  double update_rs_time = avg_value(_par_last_update_rs_times_ms);
-  double update_rs_processed_buffers =
-    sum_of_values(_par_last_update_rs_processed_buffers);
-  double scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
-  double obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
-  double termination_time = avg_value(_par_last_termination_times_ms);
-
-  double parallel_other_time = _cur_collection_par_time_ms -
-    (update_rs_time + ext_root_scan_time + mark_stack_scan_time +
-     scan_rs_time + obj_copy_time + termination_time);
-  if (update_stats) {
-    MainBodySummary* body_summary = summary->main_body_summary();
-    guarantee(body_summary != NULL, "should not be null!");
-
-    if (_satb_drain_time_set)
-      body_summary->record_satb_drain_time_ms(_cur_satb_drain_time_ms);
-    else
-      body_summary->record_satb_drain_time_ms(0.0);
-    body_summary->record_ext_root_scan_time_ms(ext_root_scan_time);
-    body_summary->record_mark_stack_scan_time_ms(mark_stack_scan_time);
-    body_summary->record_update_rs_time_ms(update_rs_time);
-    body_summary->record_scan_rs_time_ms(scan_rs_time);
-    body_summary->record_obj_copy_time_ms(obj_copy_time);
-    if (parallel) {
-      body_summary->record_parallel_time_ms(_cur_collection_par_time_ms);
-      body_summary->record_clear_ct_time_ms(_cur_clear_ct_time_ms);
-      body_summary->record_termination_time_ms(termination_time);
-      body_summary->record_parallel_other_time_ms(parallel_other_time);
-    }
-    body_summary->record_mark_closure_time_ms(_mark_closure_time_ms);
-  }
-
   if (G1PolicyVerbose > 1) {
     gclog_or_tty->print_cr("      ET: %10.6f ms           (avg: %10.6f ms)\n"
-                           "        CH Strong: %10.6f ms    (avg: %10.6f ms)\n"
-                           "        G1 Strong: %10.6f ms    (avg: %10.6f ms)\n"
-                           "        Evac:      %10.6f ms    (avg: %10.6f ms)\n"
                            "       ET-RS:  %10.6f ms      (avg: %10.6f ms)\n"
                            "      |RS|: " SIZE_FORMAT,
                            elapsed_ms, recent_avg_time_for_pauses_ms(),
-                           _cur_CH_strong_roots_dur_ms, recent_avg_time_for_CH_strong_ms(),
-                           _cur_G1_strong_roots_dur_ms, recent_avg_time_for_G1_strong_ms(),
-                           evac_ms, recent_avg_time_for_evac_ms(),
-                           scan_rs_time,
-                           recent_avg_time_for_pauses_ms() -
-                           recent_avg_time_for_G1_strong_ms(),
+                           scan_rs_time, recent_avg_time_for_rs_scan_ms(),
                            rs_size);
 
     gclog_or_tty->print_cr("       Used at start: " SIZE_FORMAT"K"
@@ -1438,7 +1407,7 @@
       }
       print_par_stats(2, "GC Worker Times", _par_last_gc_worker_times_ms);
 
-      print_stats(2, "Other", parallel_other_time);
+      print_stats(2, "Parallel Other", parallel_other_time);
       print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
     } else {
       print_stats(1, "Update RS", update_rs_time);
@@ -1600,8 +1569,8 @@
 
     double survival_ratio = 0.0;
     if (_bytes_in_collection_set_before_gc > 0) {
-      survival_ratio = (double) bytes_in_to_space_during_gc() /
-        (double) _bytes_in_collection_set_before_gc;
+      survival_ratio = (double) _bytes_copied_during_gc /
+                                   (double) _bytes_in_collection_set_before_gc;
     }
 
     _pending_cards_seq->add((double) _pending_cards);
@@ -1967,38 +1936,27 @@
 }
 
 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
-  if (_recent_pause_times_ms->num() == 0) return (double) MaxGCPauseMillis;
-  else return _recent_pause_times_ms->avg();
-}
-
-double G1CollectorPolicy::recent_avg_time_for_CH_strong_ms() {
-  if (_recent_CH_strong_roots_times_ms->num() == 0)
-    return (double)MaxGCPauseMillis/3.0;
-  else return _recent_CH_strong_roots_times_ms->avg();
+  if (_recent_pause_times_ms->num() == 0) {
+    return (double) MaxGCPauseMillis;
+  }
+  return _recent_pause_times_ms->avg();
 }
 
-double G1CollectorPolicy::recent_avg_time_for_G1_strong_ms() {
-  if (_recent_G1_strong_roots_times_ms->num() == 0)
+double G1CollectorPolicy::recent_avg_time_for_rs_scan_ms() {
+  if (_recent_rs_scan_times_ms->num() == 0) {
     return (double)MaxGCPauseMillis/3.0;
-  else return _recent_G1_strong_roots_times_ms->avg();
-}
-
-double G1CollectorPolicy::recent_avg_time_for_evac_ms() {
-  if (_recent_evac_times_ms->num() == 0) return (double)MaxGCPauseMillis/3.0;
-  else return _recent_evac_times_ms->avg();
+  }
+  return _recent_rs_scan_times_ms->avg();
 }
 
 int G1CollectorPolicy::number_of_recent_gcs() {
-  assert(_recent_CH_strong_roots_times_ms->num() ==
-         _recent_G1_strong_roots_times_ms->num(), "Sequence out of sync");
-  assert(_recent_G1_strong_roots_times_ms->num() ==
-         _recent_evac_times_ms->num(), "Sequence out of sync");
-  assert(_recent_evac_times_ms->num() ==
+  assert(_recent_rs_scan_times_ms->num() ==
          _recent_pause_times_ms->num(), "Sequence out of sync");
   assert(_recent_pause_times_ms->num() ==
          _recent_CS_bytes_used_before->num(), "Sequence out of sync");
   assert(_recent_CS_bytes_used_before->num() ==
          _recent_CS_bytes_surviving->num(), "Sequence out of sync");
+
   return _recent_pause_times_ms->num();
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Aug 16 08:02:29 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -129,15 +129,9 @@
   jlong  _num_cc_clears;                // number of times the card count cache has been cleared
 #endif
 
-  double _cur_CH_strong_roots_end_sec;
-  double _cur_CH_strong_roots_dur_ms;
-  double _cur_G1_strong_roots_end_sec;
-  double _cur_G1_strong_roots_dur_ms;
+  // Statistics for recent GC pauses.  See below for how indexed.
+  TruncatedSeq* _recent_rs_scan_times_ms;
 
-  // Statistics for recent GC pauses.  See below for how indexed.
-  TruncatedSeq* _recent_CH_strong_roots_times_ms;
-  TruncatedSeq* _recent_G1_strong_roots_times_ms;
-  TruncatedSeq* _recent_evac_times_ms;
   // These exclude marking times.
   TruncatedSeq* _recent_pause_times_ms;
   TruncatedSeq* _recent_gc_times_ms;
@@ -591,13 +585,9 @@
   int _last_update_rs_processed_buffers;
   double _last_pause_time_ms;
 
-  size_t _bytes_in_to_space_before_gc;
-  size_t _bytes_in_to_space_after_gc;
-  size_t bytes_in_to_space_during_gc() {
-    return
-      _bytes_in_to_space_after_gc - _bytes_in_to_space_before_gc;
-  }
   size_t _bytes_in_collection_set_before_gc;
+  size_t _bytes_copied_during_gc;
+
   // Used to count used bytes in CS.
   friend class CountCSClosure;
 
@@ -692,17 +682,11 @@
   // The average time in ms per collection pause, averaged over recent pauses.
   double recent_avg_time_for_pauses_ms();
 
-  // The average time in ms for processing CollectedHeap strong roots, per
-  // collection pause, averaged over recent pauses.
-  double recent_avg_time_for_CH_strong_ms();
-
-  // The average time in ms for processing the G1 remembered set, per
-  // pause, averaged over recent pauses.
-  double recent_avg_time_for_G1_strong_ms();
-
-  // The average time in ms for "evacuating followers", per pause, averaged
-  // over recent pauses.
-  double recent_avg_time_for_evac_ms();
+  // The average time in ms for RS scanning, per pause, averaged
+  // over recent pauses. (Note the RS scanning time for a pause
+  // is itself an average of the RS scanning time for each worker
+  // thread.)
+  double recent_avg_time_for_rs_scan_ms();
 
   // The number of "recent" GCs recorded in the number sequences
   int number_of_recent_gcs();
@@ -817,10 +801,6 @@
     return _bytes_in_collection_set_before_gc;
   }
 
-  size_t bytes_in_to_space() {
-    return bytes_in_to_space_during_gc();
-  }
-
   unsigned calc_gc_alloc_time_stamp() {
     return _all_pause_times_ms->num() + 1;
   }
@@ -887,9 +867,6 @@
   virtual void record_concurrent_pause();
   virtual void record_concurrent_pause_end();
 
-  virtual void record_collection_pause_end_CH_strong_roots();
-  virtual void record_collection_pause_end_G1_strong_roots();
-
   virtual void record_collection_pause_end();
   void print_heap_transition();
 
@@ -992,9 +969,16 @@
   }
 #endif
 
-  // Record the fact that "bytes" bytes allocated in a region.
-  void record_before_bytes(size_t bytes);
-  void record_after_bytes(size_t bytes);
+  // Record how much space we copied during a GC. This is typically
+  // called when a GC alloc region is being retired.
+  void record_bytes_copied_during_gc(size_t bytes) {
+    _bytes_copied_during_gc += bytes;
+  }
+
+  // The amount of space we copied during a GC.
+  size_t bytes_copied_during_gc() {
+    return _bytes_copied_during_gc;
+  }
 
   // Choose a new collection set.  Marks the chosen regions as being
   // "in_collection_set", and links them together.  The head and number of
@@ -1208,10 +1192,6 @@
     return purpose == GCAllocForSurvived;
   }
 
-  inline GCAllocPurpose alternative_purpose(int purpose) {
-    return GCAllocForTenured;
-  }
-
   static const size_t REGIONS_UNLIMITED = ~(size_t)0;
 
   size_t max_regions(int purpose);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -352,7 +352,6 @@
          "we should have already filtered out humongous regions");
 
   _in_collection_set = false;
-  _is_gc_alloc_region = false;
 
   set_young_index_in_cset(-1);
   uninstall_surv_rate_group();
@@ -486,7 +485,7 @@
   : G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
     _next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
     _humongous_type(NotHumongous), _humongous_start_region(NULL),
-    _in_collection_set(false), _is_gc_alloc_region(false),
+    _in_collection_set(false),
     _next_in_special_set(NULL), _orig_end(NULL),
     _claimed(InitialClaimValue), _evacuation_failed(false),
     _prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
@@ -716,8 +715,6 @@
   }
   if (in_collection_set())
     st->print(" CS");
-  else if (is_gc_alloc_region())
-    st->print(" A ");
   else
     st->print("   ");
   if (is_young())
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Aug 16 08:02:29 2011 -0700
@@ -251,10 +251,6 @@
   // True iff the region is in current collection_set.
   bool _in_collection_set;
 
-  // Is this or has it been an allocation region in the current collection
-  // pause.
-  bool _is_gc_alloc_region;
-
   // True iff an attempt to evacuate an object in the region failed.
   bool _evacuation_failed;
 
@@ -497,27 +493,6 @@
     _next_in_special_set = r;
   }
 
-  // True iff it is or has been an allocation region in the current
-  // collection pause.
-  bool is_gc_alloc_region() const {
-    return _is_gc_alloc_region;
-  }
-  void set_is_gc_alloc_region(bool b) {
-    _is_gc_alloc_region = b;
-  }
-  HeapRegion* next_gc_alloc_region() {
-    assert(is_gc_alloc_region(), "should only invoke on member of CS.");
-    assert(_next_in_special_set == NULL ||
-           _next_in_special_set->is_gc_alloc_region(),
-           "Malformed CS.");
-    return _next_in_special_set;
-  }
-  void set_next_gc_alloc_region(HeapRegion* r) {
-    assert(is_gc_alloc_region(), "should only invoke on member of CS.");
-    assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
-    _next_in_special_set = r;
-  }
-
   // Methods used by the HeapRegionSetBase class and subclasses.
 
   // Getter and setter for the next field used to link regions into
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -364,7 +364,10 @@
   PosParPRT** next_addr() { return &_next; }
 
   bool should_expand(int tid) {
-    return par_tables() == NULL && tid > 0 && hr()->is_gc_alloc_region();
+    // Given that we now defer RSet updates for after a GC we don't
+    // really need to expand the tables any more. This code should be
+    // cleaned up in the future (see CR 6921087).
+    return false;
   }
 
   void par_expand() {
--- a/hotspot/src/share/vm/runtime/virtualspace.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/runtime/virtualspace.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,7 @@
   assert(len >= required_size, "len too small");
 
   const size_t s = size_t(addr);
-  const size_t beg_ofs = s + prefix_size & suffix_align - 1;
+  const size_t beg_ofs = (s + prefix_size) & (suffix_align - 1);
   const size_t beg_delta = beg_ofs == 0 ? 0 : suffix_align - beg_ofs;
 
   if (len < beg_delta + required_size) {
@@ -113,8 +113,8 @@
     assert(res >= raw, "alignment decreased start addr");
     assert(res + prefix_size + suffix_size <= raw + reserve_size,
            "alignment increased end addr");
-    assert((res & prefix_align - 1) == 0, "bad alignment of prefix");
-    assert((res + prefix_size & suffix_align - 1) == 0,
+    assert((res & (prefix_align - 1)) == 0, "bad alignment of prefix");
+    assert(((res + prefix_size) & (suffix_align - 1)) == 0,
            "bad alignment of suffix");
   }
 #endif
@@ -135,7 +135,7 @@
     assert(UseCompressedOops, "currently requested address used only for compressed oops");
     if (PrintCompressedOopsMode) {
       tty->cr();
-      tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
+      tty->print_cr("Reserved memory not at requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
     }
     // OS ignored requested address. Try different address.
     if (special) {
@@ -162,11 +162,11 @@
   assert(prefix_align != 0, "sanity");
   assert(suffix_size != 0, "sanity");
   assert(suffix_align != 0, "sanity");
-  assert((prefix_size & prefix_align - 1) == 0,
+  assert((prefix_size & (prefix_align - 1)) == 0,
     "prefix_size not divisible by prefix_align");
-  assert((suffix_size & suffix_align - 1) == 0,
+  assert((suffix_size & (suffix_align - 1)) == 0,
     "suffix_size not divisible by suffix_align");
-  assert((suffix_align & prefix_align - 1) == 0,
+  assert((suffix_align & (prefix_align - 1)) == 0,
     "suffix_align not divisible by prefix_align");
 
   // Assert that if noaccess_prefix is used, it is the same as prefix_align.
@@ -210,8 +210,8 @@
   if (addr == NULL) return;
 
   // Check whether the result has the needed alignment (unlikely unless
-  // prefix_align == suffix_align).
-  const size_t ofs = size_t(addr) + adjusted_prefix_size & suffix_align - 1;
+  // prefix_align < suffix_align).
+  const size_t ofs = (size_t(addr) + adjusted_prefix_size) & (suffix_align - 1);
   if (ofs != 0) {
     // Wrong alignment.  Release, allocate more space and do manual alignment.
     //
@@ -232,6 +232,15 @@
       addr = reserve_and_align(size + suffix_align, adjusted_prefix_size,
                                prefix_align, suffix_size, suffix_align);
     }
+
+    if (requested_address != 0 &&
+        failed_to_reserve_as_requested(addr, requested_address, size, false)) {
+      // As a result of the alignment constraints, the allocated addr differs
+      // from the requested address. Return back to the caller who can
+      // take remedial action (like try again without a requested address).
+      assert(_base == NULL, "should be");
+      return;
+    }
   }
 
   _base = addr;
@@ -245,13 +254,19 @@
                                const size_t noaccess_prefix,
                                bool executable) {
   const size_t granularity = os::vm_allocation_granularity();
-  assert((size & granularity - 1) == 0,
+  assert((size & (granularity - 1)) == 0,
          "size not aligned to os::vm_allocation_granularity()");
-  assert((alignment & granularity - 1) == 0,
+  assert((alignment & (granularity - 1)) == 0,
          "alignment not aligned to os::vm_allocation_granularity()");
   assert(alignment == 0 || is_power_of_2((intptr_t)alignment),
          "not a power of 2");
 
+  alignment = MAX2(alignment, (size_t)os::vm_page_size());
+
+  // Assert that if noaccess_prefix is used, it is the same as alignment.
+  assert(noaccess_prefix == 0 ||
+         noaccess_prefix == alignment, "noaccess prefix wrong");
+
   _base = NULL;
   _size = 0;
   _special = false;
@@ -282,10 +297,8 @@
         return;
       }
       // Check alignment constraints
-      if (alignment > 0) {
-        assert((uintptr_t) base % alignment == 0,
-               "Large pages returned a non-aligned address");
-      }
+      assert((uintptr_t) base % alignment == 0,
+             "Large pages returned a non-aligned address");
       _special = true;
     } else {
       // failed; try to reserve regular memory below
@@ -321,7 +334,7 @@
     if (base == NULL) return;
 
     // Check alignment constraints
-    if (alignment > 0 && ((size_t)base & alignment - 1) != 0) {
+    if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
       // Base not aligned, retry
       if (!os::release_memory(base, size)) fatal("os::release_memory failed");
       // Reserve size large enough to do manual alignment and
@@ -338,12 +351,21 @@
         os::release_memory(extra_base, extra_size);
         base = os::reserve_memory(size, base);
       } while (base == NULL);
+
+      if (requested_address != 0 &&
+          failed_to_reserve_as_requested(base, requested_address, size, false)) {
+        // As a result of the alignment constraints, the allocated base differs
+        // from the requested address. Return back to the caller who can
+        // take remedial action (like try again without a requested address).
+        assert(_base == NULL, "should be");
+        return;
+      }
     }
   }
   // Done
   _base = base;
   _size = size;
-  _alignment = MAX2(alignment, (size_t) os::vm_page_size());
+  _alignment = alignment;
   _noaccess_prefix = noaccess_prefix;
 
   // Assert that if noaccess_prefix is used, it is the same as alignment.
--- a/hotspot/src/share/vm/services/gcNotifier.cpp	Thu Jul 28 14:10:21 2011 -0400
+++ b/hotspot/src/share/vm/services/gcNotifier.cpp	Tue Aug 16 08:02:29 2011 -0700
@@ -92,7 +92,6 @@
                           &args,
                           CHECK_NH);
   return Handle(THREAD,(oop)result.get_jobject());
-
 }
 
 static Handle createGcInfo(GCMemoryManager *gcManager, GCStatInfo *gcStatInfo,TRAPS) {
@@ -100,9 +99,16 @@
   // Fill the arrays of MemoryUsage objects with before and after GC
   // per pool memory usage
 
-  klassOop muKlass = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);   objArrayOop bu = oopFactory::new_objArray( muKlass,MemoryService::num_memory_pools(), CHECK_NH);
+  klassOop mu_klass = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);
+  instanceKlassHandle mu_kh(THREAD, mu_klass);
+
+  // The array allocations below should use a handle containing mu_klass
+  // as the first allocation could trigger a GC, causing the actual
+  // klass oop to move, and leaving mu_klass pointing to the old
+  // location.
+  objArrayOop bu = oopFactory::new_objArray(mu_kh(), MemoryService::num_memory_pools(), CHECK_NH);
   objArrayHandle usage_before_gc_ah(THREAD, bu);
-  objArrayOop au = oopFactory::new_objArray(muKlass,MemoryService::num_memory_pools(), CHECK_NH);
+  objArrayOop au = oopFactory::new_objArray(mu_kh(), MemoryService::num_memory_pools(), CHECK_NH);
   objArrayHandle usage_after_gc_ah(THREAD, au);
 
   for (int i = 0; i < MemoryService::num_memory_pools(); i++) {
@@ -126,7 +132,7 @@
   // The type is 'I'
   objArrayOop extra_args_array = oopFactory::new_objArray(SystemDictionary::Integer_klass(), 1, CHECK_NH);
   objArrayHandle extra_array (THREAD, extra_args_array);
-  klassOop itKlass= SystemDictionary::Integer_klass();
+  klassOop itKlass = SystemDictionary::Integer_klass();
   instanceKlassHandle intK(THREAD, itKlass);
 
   instanceHandle extra_arg_val = intK->allocate_instance_handle(CHECK_NH);
@@ -147,7 +153,7 @@
   extra_array->obj_at_put(0,extra_arg_val());
 
   klassOop gcInfoklass = Management::com_sun_management_GcInfo_klass(CHECK_NH);
-  instanceKlassHandle ik (THREAD,gcInfoklass);
+  instanceKlassHandle ik(THREAD, gcInfoklass);
 
   Handle gcInfo_instance = ik->allocate_instance_handle(CHECK_NH);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/7072527/TestFullGCCount.java	Tue Aug 16 08:02:29 2011 -0700
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestFullGCount.java
+ * @bug 7072527
+ * @summary CMS: JMM GC counters overcount in some cases
+ * @run main/othervm -XX:+UseConcMarkSweepGC TestFullGCCount
+ *
+ */
+import java.util.*;
+import java.lang.management.*;
+
+public class TestFullGCCount {
+
+    public String collectorName = "ConcurrentMarkSweep";
+
+    public static void main(String [] args) {
+
+        TestFullGCCount t = null;
+        if (args.length==2) {
+            t = new TestFullGCCount(args[0], args[1]);
+        } else {
+            t = new TestFullGCCount();
+        }
+        System.out.println("Monitoring collector: " + t.collectorName);
+        t.run();
+    }
+
+    public TestFullGCCount(String pool, String collector) {
+        collectorName = collector;
+    }
+
+    public TestFullGCCount() {
+    }
+
+    public void run() {
+        int count = 0;
+        int iterations = 20;
+        long counts[] = new long[iterations];
+        boolean diffAlways2 = true; // assume we will fail
+
+        for (int i=0; i<iterations; i++) {
+            System.gc();
+            counts[i] = getCollectionCount();
+            if (i>0) {
+                if (counts[i] - counts[i-1] != 2) {
+                    diffAlways2 = false;
+                }
+            }
+        }
+        if (diffAlways2) {
+            throw new RuntimeException("FAILED: System.gc must be incrementing count twice.");
+        }
+        System.out.println("Passed.");
+    }
+
+    private long getCollectionCount() {
+        long count = 0;
+        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
+        List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
+        for (int i=0; i<collectors.size(); i++) {
+            GarbageCollectorMXBean collector = collectors.get(i);
+            String name = collector.getName();
+            if (name.contains(collectorName)) {
+                System.out.println(name + ": collection count = "
+                                   + collector.getCollectionCount());
+                count = collector.getCollectionCount();
+            }
+        }
+        return count;
+    }
+
+}
+