Merge
authorjmasa
Wed, 02 Jul 2014 17:24:18 -0700
changeset 25488 b908ec9bf56d
parent 25367 3924abbe7bc9 (current diff)
parent 25487 cc7635939d8a (diff)
child 25489 feb54edc509d
Merge
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp
hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp
hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp
hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegion.java	Wed Jul 02 17:24:18 2014 -0700
@@ -24,23 +24,26 @@
 
 package sun.jvm.hotspot.gc_implementation.g1;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Observable;
 import java.util.Observer;
-
 import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.memory.ContiguousSpace;
+import sun.jvm.hotspot.memory.CompactibleSpace;
+import sun.jvm.hotspot.memory.MemRegion;
 import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.types.AddressField;
 import sun.jvm.hotspot.types.CIntegerField;
 import sun.jvm.hotspot.types.Type;
 import sun.jvm.hotspot.types.TypeDataBase;
 
 // Mirror class for HeapRegion. Currently we don't actually include
-// any of its fields but only iterate over it (which we get "for free"
-// as HeapRegion ultimately inherits from ContiguousSpace).
+// any of its fields but only iterate over it.
 
-public class HeapRegion extends ContiguousSpace {
+public class HeapRegion extends CompactibleSpace {
     // static int GrainBytes;
     static private CIntegerField grainBytesField;
+    static private AddressField topField;
 
     static {
         VM.registerVMInitializedObserver(new Observer() {
@@ -54,6 +57,8 @@
         Type type = db.lookupType("HeapRegion");
 
         grainBytesField = type.getCIntegerField("GrainBytes");
+        topField = type.getAddressField("_top");
+
     }
 
     static public long grainBytes() {
@@ -63,4 +68,25 @@
     public HeapRegion(Address addr) {
         super(addr);
     }
+
+    public Address top() {
+        return topField.getValue(addr);
+    }
+
+    @Override
+    public List getLiveRegions() {
+        List res = new ArrayList();
+        res.add(new MemRegion(bottom(), top()));
+        return res;
+    }
+
+    @Override
+    public long used() {
+        return top().minus(bottom());
+    }
+
+    @Override
+    public long free() {
+        return end().minus(top());
+    }
 }
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -488,7 +488,7 @@
       while(iter.next()) {
         if (iter.type() == relocInfo::virtual_call_type) {
           if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
-            CompiledIC *ic = CompiledIC_at(iter.reloc());
+            CompiledIC *ic = CompiledIC_at(&iter);
             if (TraceCompiledIC) {
               tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
               ic->print();
--- a/hotspot/src/share/vm/code/compiledIC.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/code/compiledIC.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -159,10 +159,24 @@
 //-----------------------------------------------------------------------------
 // High-level access to an inline cache. Guaranteed to be MT-safe.
 
+void CompiledIC::initialize_from_iter(RelocIterator* iter) {
+  assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call");
+
+  if (iter->type() == relocInfo::virtual_call_type) {
+    virtual_call_Relocation* r = iter->virtual_call_reloc();
+    _is_optimized = false;
+    _value = nativeMovConstReg_at(r->cached_value());
+  } else {
+    assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
+    _is_optimized = true;
+    _value = NULL;
+  }
+}
+
 CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
   : _ic_call(call)
 {
-  address ic_call = call->instruction_address();
+  address ic_call = _ic_call->instruction_address();
 
   assert(ic_call != NULL, "ic_call address must be set");
   assert(nm != NULL, "must pass nmethod");
@@ -173,15 +187,21 @@
   bool ret = iter.next();
   assert(ret == true, "relocInfo must exist at this address");
   assert(iter.addr() == ic_call, "must find ic_call");
-  if (iter.type() == relocInfo::virtual_call_type) {
-    virtual_call_Relocation* r = iter.virtual_call_reloc();
-    _is_optimized = false;
-    _value = nativeMovConstReg_at(r->cached_value());
-  } else {
-    assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
-    _is_optimized = true;
-    _value = NULL;
-  }
+
+  initialize_from_iter(&iter);
+}
+
+CompiledIC::CompiledIC(RelocIterator* iter)
+  : _ic_call(nativeCall_at(iter->addr()))
+{
+  address ic_call = _ic_call->instruction_address();
+
+  nmethod* nm = iter->code();
+  assert(ic_call != NULL, "ic_call address must be set");
+  assert(nm != NULL, "must pass nmethod");
+  assert(nm->contains(ic_call), "must be in nmethod");
+
+  initialize_from_iter(iter);
 }
 
 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
--- a/hotspot/src/share/vm/code/compiledIC.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/code/compiledIC.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -150,6 +150,9 @@
   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
 
   CompiledIC(nmethod* nm, NativeCall* ic_call);
+  CompiledIC(RelocIterator* iter);
+
+  void initialize_from_iter(RelocIterator* iter);
 
   static bool is_icholder_entry(address entry);
 
@@ -183,6 +186,7 @@
   friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
   friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
   friend CompiledIC* CompiledIC_at(Relocation* call_site);
+  friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
 
   // This is used to release CompiledICHolder*s from nmethods that
   // are about to be freed.  The callsite might contain other stale
@@ -263,6 +267,13 @@
   return c_ic;
 }
 
+inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
+  assert(reloc_iter->type() == relocInfo::virtual_call_type ||
+      reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
+  CompiledIC* c_ic = new CompiledIC(reloc_iter);
+  c_ic->verify();
+  return c_ic;
+}
 
 //-----------------------------------------------------------------------------
 // The CompiledStaticCall represents a call to a static method in the compiled
--- a/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/code/nmethod.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -1146,7 +1146,7 @@
     switch(iter.type()) {
       case relocInfo::virtual_call_type:
       case relocInfo::opt_virtual_call_type: {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        CompiledIC *ic = CompiledIC_at(&iter);
         // Ok, to lookup references to zombies here
         CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
         if( cb != NULL && cb->is_nmethod() ) {
@@ -1632,7 +1632,7 @@
     RelocIterator iter(this, low_boundary);
     while(iter.next()) {
       if (iter.type() == relocInfo::virtual_call_type) {
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           // The only exception is compiledICHolder oops which may
           // yet be marked below. (We check this further below).
@@ -1741,7 +1741,7 @@
     // compiled code is maintaining a link to dead metadata.
     address static_call_addr = NULL;
     if (iter.type() == relocInfo::opt_virtual_call_type) {
-      CompiledIC* cic = CompiledIC_at(iter.reloc());
+      CompiledIC* cic = CompiledIC_at(&iter);
       if (!cic->is_call_to_interpreted()) {
         static_call_addr = iter.addr();
       }
@@ -1793,7 +1793,7 @@
         }
       } else if (iter.type() == relocInfo::virtual_call_type) {
         // Check compiledIC holders associated with this nmethod
-        CompiledIC *ic = CompiledIC_at(iter.reloc());
+        CompiledIC *ic = CompiledIC_at(&iter);
         if (ic->is_icholder_call()) {
           CompiledICHolder* cichk = ic->cached_icholder();
           f(cichk->holder_method());
@@ -2922,7 +2922,7 @@
     case relocInfo::virtual_call_type:
     case relocInfo::opt_virtual_call_type: {
       VerifyMutexLocker mc(CompiledIC_lock);
-      CompiledIC_at(iter.reloc())->print();
+      CompiledIC_at(&iter)->print();
       break;
     }
     case relocInfo::static_call_type:
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1354 +0,0 @@
-/*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/shared/gcStats.hpp"
-#include "memory/defNewGeneration.hpp"
-#include "memory/genCollectedHeap.hpp"
-#include "runtime/thread.hpp"
-#ifdef TARGET_OS_FAMILY_linux
-# include "os_linux.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_solaris
-# include "os_solaris.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_windows
-# include "os_windows.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_aix
-# include "os_aix.inline.hpp"
-#endif
-#ifdef TARGET_OS_FAMILY_bsd
-# include "os_bsd.inline.hpp"
-#endif
-elapsedTimer CMSAdaptiveSizePolicy::_concurrent_timer;
-elapsedTimer CMSAdaptiveSizePolicy::_STW_timer;
-
-// Defined if the granularity of the time measurements is potentially too large.
-#define CLOCK_GRANULARITY_TOO_LARGE
-
-CMSAdaptiveSizePolicy::CMSAdaptiveSizePolicy(size_t init_eden_size,
-                                             size_t init_promo_size,
-                                             size_t init_survivor_size,
-                                             double max_gc_minor_pause_sec,
-                                             double max_gc_pause_sec,
-                                             uint gc_cost_ratio) :
-  AdaptiveSizePolicy(init_eden_size,
-                     init_promo_size,
-                     init_survivor_size,
-                     max_gc_pause_sec,
-                     gc_cost_ratio) {
-
-  clear_internal_time_intervals();
-
-  _processor_count = os::active_processor_count();
-
-  if (CMSConcurrentMTEnabled && (ConcGCThreads > 1)) {
-    assert(_processor_count > 0, "Processor count is suspect");
-    _concurrent_processor_count = MIN2((uint) ConcGCThreads,
-                                       (uint) _processor_count);
-  } else {
-    _concurrent_processor_count = 1;
-  }
-
-  _avg_concurrent_time  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_concurrent_interval = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_concurrent_gc_cost = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  _avg_initial_pause    = new AdaptivePaddedAverage(AdaptiveTimeWeight,
-                                                    PausePadding);
-  _avg_remark_pause     = new AdaptivePaddedAverage(AdaptiveTimeWeight,
-                                                    PausePadding);
-
-  _avg_cms_STW_time     = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_cms_STW_gc_cost  = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  _avg_cms_free         = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_cms_free_at_sweep = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_cms_promo        = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  // Mark-sweep-compact
-  _avg_msc_pause        = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_msc_interval     = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_msc_gc_cost      = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  // Mark-sweep
-  _avg_ms_pause = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_ms_interval      = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-  _avg_ms_gc_cost       = new AdaptiveWeightedAverage(AdaptiveTimeWeight);
-
-  // Variables that estimate pause times as a function of generation
-  // size.
-  _remark_pause_old_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-  _initial_pause_old_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-  _remark_pause_young_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-  _initial_pause_young_estimator =
-    new LinearLeastSquareFit(AdaptiveSizePolicyWeight);
-
-  // Alignment comes from that used in ReservedSpace.
-  _generation_alignment = os::vm_allocation_granularity();
-
-  // Start the concurrent timer here so that the first
-  // concurrent_phases_begin() measures a finite mutator
-  // time.  A finite mutator time is used to determine
-  // if a concurrent collection has been started.  If this
-  // proves to be a problem, use some explicit flag to
-  // signal that a concurrent collection has been started.
-  _concurrent_timer.start();
-  _STW_timer.start();
-}
-
-double CMSAdaptiveSizePolicy::concurrent_processor_fraction() {
-  // For now assume no other daemon threads are taking alway
-  // cpu's from the application.
-  return ((double) _concurrent_processor_count / (double) _processor_count);
-}
-
-double CMSAdaptiveSizePolicy::concurrent_collection_cost(
-                                                  double interval_in_seconds) {
-  //  When the precleaning and sweeping phases use multiple
-  // threads, change one_processor_fraction to
-  // concurrent_processor_fraction().
-  double one_processor_fraction = 1.0 / ((double) processor_count());
-  double concurrent_cost =
-    collection_cost(_latest_cms_concurrent_marking_time_secs,
-                interval_in_seconds) * concurrent_processor_fraction() +
-    collection_cost(_latest_cms_concurrent_precleaning_time_secs,
-                interval_in_seconds) * one_processor_fraction +
-    collection_cost(_latest_cms_concurrent_sweeping_time_secs,
-                interval_in_seconds) * one_processor_fraction;
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "\nCMSAdaptiveSizePolicy::scaled_concurrent_collection_cost(%f) "
-      "_latest_cms_concurrent_marking_cost %f "
-      "_latest_cms_concurrent_precleaning_cost %f "
-      "_latest_cms_concurrent_sweeping_cost %f "
-      "concurrent_processor_fraction %f "
-      "concurrent_cost %f ",
-      interval_in_seconds,
-      collection_cost(_latest_cms_concurrent_marking_time_secs,
-        interval_in_seconds),
-      collection_cost(_latest_cms_concurrent_precleaning_time_secs,
-        interval_in_seconds),
-      collection_cost(_latest_cms_concurrent_sweeping_time_secs,
-        interval_in_seconds),
-      concurrent_processor_fraction(),
-      concurrent_cost);
-  }
-  return concurrent_cost;
-}
-
-double CMSAdaptiveSizePolicy::concurrent_collection_time() {
-  double latest_cms_sum_concurrent_phases_time_secs =
-    _latest_cms_concurrent_marking_time_secs +
-    _latest_cms_concurrent_precleaning_time_secs +
-    _latest_cms_concurrent_sweeping_time_secs;
-  return latest_cms_sum_concurrent_phases_time_secs;
-}
-
-double CMSAdaptiveSizePolicy::scaled_concurrent_collection_time() {
-  //  When the precleaning and sweeping phases use multiple
-  // threads, change one_processor_fraction to
-  // concurrent_processor_fraction().
-  double one_processor_fraction = 1.0 / ((double) processor_count());
-  double latest_cms_sum_concurrent_phases_time_secs =
-    _latest_cms_concurrent_marking_time_secs * concurrent_processor_fraction() +
-    _latest_cms_concurrent_precleaning_time_secs * one_processor_fraction +
-    _latest_cms_concurrent_sweeping_time_secs * one_processor_fraction ;
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "\nCMSAdaptiveSizePolicy::scaled_concurrent_collection_time "
-      "_latest_cms_concurrent_marking_time_secs %f "
-      "_latest_cms_concurrent_precleaning_time_secs %f "
-      "_latest_cms_concurrent_sweeping_time_secs %f "
-      "concurrent_processor_fraction %f "
-      "latest_cms_sum_concurrent_phases_time_secs %f ",
-      _latest_cms_concurrent_marking_time_secs,
-      _latest_cms_concurrent_precleaning_time_secs,
-      _latest_cms_concurrent_sweeping_time_secs,
-      concurrent_processor_fraction(),
-      latest_cms_sum_concurrent_phases_time_secs);
-  }
-  return latest_cms_sum_concurrent_phases_time_secs;
-}
-
-void CMSAdaptiveSizePolicy::update_minor_pause_old_estimator(
-    double minor_pause_in_ms) {
-  // Get the equivalent of the free space
-  // that is available for promotions in the CMS generation
-  // and use that to update _minor_pause_old_estimator
-
-  // Don't implement this until it is needed. A warning is
-  // printed if _minor_pause_old_estimator is used.
-}
-
-void CMSAdaptiveSizePolicy::concurrent_marking_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": concurrent_marking_begin ");
-  }
-  //  Update the interval time
-  _concurrent_timer.stop();
-  _latest_cms_collection_end_to_collection_start_secs = _concurrent_timer.seconds();
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_marking_begin: "
-    "mutator time %f", _latest_cms_collection_end_to_collection_start_secs);
-  }
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::concurrent_marking_end() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_marking_end()");
-  }
-
-  _concurrent_timer.stop();
-  _latest_cms_concurrent_marking_time_secs = _concurrent_timer.seconds();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\n CMSAdaptiveSizePolicy::concurrent_marking_end"
-      ":concurrent marking time (s) %f",
-      _latest_cms_concurrent_marking_time_secs);
-  }
-}
-
-void CMSAdaptiveSizePolicy::concurrent_precleaning_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::concurrent_precleaning_begin()");
-  }
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-}
-
-
-void CMSAdaptiveSizePolicy::concurrent_precleaning_end() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_precleaning_end()");
-  }
-
-  _concurrent_timer.stop();
-  // May be set again by a second call during the same collection.
-  _latest_cms_concurrent_precleaning_time_secs = _concurrent_timer.seconds();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\n CMSAdaptiveSizePolicy::concurrent_precleaning_end"
-      ":concurrent precleaning time (s) %f",
-      _latest_cms_concurrent_precleaning_time_secs);
-  }
-}
-
-void CMSAdaptiveSizePolicy::concurrent_sweeping_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::concurrent_sweeping_begin()");
-  }
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-}
-
-
-void CMSAdaptiveSizePolicy::concurrent_sweeping_end() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_sweeping_end()");
-  }
-
-  _concurrent_timer.stop();
-  _latest_cms_concurrent_sweeping_time_secs = _concurrent_timer.seconds();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\n CMSAdaptiveSizePolicy::concurrent_sweeping_end"
-      ":concurrent sweeping time (s) %f",
-      _latest_cms_concurrent_sweeping_time_secs);
-  }
-}
-
-void CMSAdaptiveSizePolicy::concurrent_phases_end(GCCause::Cause gc_cause,
-                                                  size_t cur_eden,
-                                                  size_t cur_promo) {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": concurrent_phases_end ");
-  }
-
-  // Update the concurrent timer
-  _concurrent_timer.stop();
-
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-
-    avg_cms_free()->sample(cur_promo);
-    double latest_cms_sum_concurrent_phases_time_secs =
-      concurrent_collection_time();
-
-    _avg_concurrent_time->sample(latest_cms_sum_concurrent_phases_time_secs);
-
-    // Cost of collection (unit-less)
-
-    // Total interval for collection.  May not be valid.  Tests
-    // below determine whether to use this.
-    //
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::concurrent_phases_end \n"
-      "_latest_cms_reset_end_to_initial_mark_start_secs %f \n"
-      "_latest_cms_initial_mark_start_to_end_time_secs %f \n"
-      "_latest_cms_remark_start_to_end_time_secs %f \n"
-      "_latest_cms_concurrent_marking_time_secs %f \n"
-      "_latest_cms_concurrent_precleaning_time_secs %f \n"
-      "_latest_cms_concurrent_sweeping_time_secs %f \n"
-      "latest_cms_sum_concurrent_phases_time_secs %f \n"
-      "_latest_cms_collection_end_to_collection_start_secs %f \n"
-      "concurrent_processor_fraction %f",
-      _latest_cms_reset_end_to_initial_mark_start_secs,
-      _latest_cms_initial_mark_start_to_end_time_secs,
-      _latest_cms_remark_start_to_end_time_secs,
-      _latest_cms_concurrent_marking_time_secs,
-      _latest_cms_concurrent_precleaning_time_secs,
-      _latest_cms_concurrent_sweeping_time_secs,
-      latest_cms_sum_concurrent_phases_time_secs,
-      _latest_cms_collection_end_to_collection_start_secs,
-      concurrent_processor_fraction());
-  }
-    double interval_in_seconds =
-      _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs +
-      latest_cms_sum_concurrent_phases_time_secs +
-      _latest_cms_collection_end_to_collection_start_secs;
-    assert(interval_in_seconds >= 0.0,
-      "Bad interval between cms collections");
-
-    // Sample for performance counter
-    avg_concurrent_interval()->sample(interval_in_seconds);
-
-    // STW costs (initial and remark pauses)
-    // Cost of collection (unit-less)
-    assert(_latest_cms_initial_mark_start_to_end_time_secs >= 0.0,
-      "Bad initial mark pause");
-    assert(_latest_cms_remark_start_to_end_time_secs >= 0.0,
-      "Bad remark pause");
-    double STW_time_in_seconds =
-      _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs;
-    double STW_collection_cost = 0.0;
-    if (interval_in_seconds > 0.0) {
-      // cost for the STW phases of the concurrent collection.
-      STW_collection_cost = STW_time_in_seconds / interval_in_seconds;
-      avg_cms_STW_gc_cost()->sample(STW_collection_cost);
-    }
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("cmsAdaptiveSizePolicy::STW_collection_end: "
-        "STW gc cost: %f  average: %f", STW_collection_cost,
-        avg_cms_STW_gc_cost()->average());
-      gclog_or_tty->print_cr("  STW pause: %f (ms) STW period %f (ms)",
-        (double) STW_time_in_seconds * MILLIUNITS,
-        (double) interval_in_seconds * MILLIUNITS);
-    }
-
-    double concurrent_cost = 0.0;
-    if (latest_cms_sum_concurrent_phases_time_secs > 0.0) {
-      concurrent_cost = concurrent_collection_cost(interval_in_seconds);
-
-      avg_concurrent_gc_cost()->sample(concurrent_cost);
-      // Average this ms cost into all the other types gc costs
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print("cmsAdaptiveSizePolicy::concurrent_phases_end: "
-          "concurrent gc cost: %f  average: %f",
-          concurrent_cost,
-          _avg_concurrent_gc_cost->average());
-        gclog_or_tty->print_cr("  concurrent time: %f (ms) cms period %f (ms)"
-          " processor fraction: %f",
-          latest_cms_sum_concurrent_phases_time_secs * MILLIUNITS,
-          interval_in_seconds * MILLIUNITS,
-          concurrent_processor_fraction());
-      }
-    }
-    double total_collection_cost = STW_collection_cost + concurrent_cost;
-    avg_major_gc_cost()->sample(total_collection_cost);
-
-    // Gather information for estimating future behavior
-    double initial_pause_in_ms = _latest_cms_initial_mark_start_to_end_time_secs * MILLIUNITS;
-    double remark_pause_in_ms = _latest_cms_remark_start_to_end_time_secs * MILLIUNITS;
-
-    double cur_promo_size_in_mbytes = ((double)cur_promo)/((double)M);
-    initial_pause_old_estimator()->update(cur_promo_size_in_mbytes,
-      initial_pause_in_ms);
-    remark_pause_old_estimator()->update(cur_promo_size_in_mbytes,
-      remark_pause_in_ms);
-    major_collection_estimator()->update(cur_promo_size_in_mbytes,
-      total_collection_cost);
-
-    // This estimate uses the average eden size.  It could also
-    // have used the latest eden size.  Which is better?
-    double cur_eden_size_in_mbytes = ((double)cur_eden)/((double) M);
-    initial_pause_young_estimator()->update(cur_eden_size_in_mbytes,
-      initial_pause_in_ms);
-    remark_pause_young_estimator()->update(cur_eden_size_in_mbytes,
-      remark_pause_in_ms);
-  }
-
-  clear_internal_time_intervals();
-
-  set_first_after_collection();
-
-  // The concurrent phases keeps track of it's own mutator interval
-  // with this timer.  This allows the stop-the-world phase to
-  // be included in the mutator time so that the stop-the-world time
-  // is not double counted.  Reset and start it.
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-
-  // The mutator time between STW phases does not include the
-  // concurrent collection time.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_initial_begin() {
-  //  Update the interval time
-  _STW_timer.stop();
-  _latest_cms_reset_end_to_initial_mark_start_secs = _STW_timer.seconds();
-  // Reset for the initial mark
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_initial_end(
-    GCCause::Cause gc_cause) {
-  _STW_timer.stop();
-
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-    _latest_cms_initial_mark_start_to_end_time_secs = _STW_timer.seconds();
-    avg_initial_pause()->sample(_latest_cms_initial_mark_start_to_end_time_secs);
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print(
-        "cmsAdaptiveSizePolicy::checkpoint_roots_initial_end: "
-        "initial pause: %f ", _latest_cms_initial_mark_start_to_end_time_secs);
-    }
-  }
-
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_final_begin() {
-  _STW_timer.stop();
-  _latest_cms_initial_mark_end_to_remark_start_secs = _STW_timer.seconds();
-  // Start accumulating time for the remark in the STW timer.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::checkpoint_roots_final_end(
-    GCCause::Cause gc_cause) {
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-    // Total initial mark pause + remark pause.
-    _latest_cms_remark_start_to_end_time_secs = _STW_timer.seconds();
-    double STW_time_in_seconds = _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs;
-    double STW_time_in_ms = STW_time_in_seconds * MILLIUNITS;
-
-    avg_remark_pause()->sample(_latest_cms_remark_start_to_end_time_secs);
-
-    // Sample total for initial mark + remark
-    avg_cms_STW_time()->sample(STW_time_in_seconds);
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("cmsAdaptiveSizePolicy::checkpoint_roots_final_end: "
-        "remark pause: %f", _latest_cms_remark_start_to_end_time_secs);
-    }
-
-  }
-  // Don't start the STW times here because the concurrent
-  // sweep and reset has not happened.
-  //  Keep the old comment above in case I don't understand
-  // what is going on but now
-  // Start the STW timer because it is used by ms_collection_begin()
-  // and ms_collection_end() to get the sweep time if a MS is being
-  // done in the foreground.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::msc_collection_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": msc_collection_begin ");
-  }
-  _STW_timer.stop();
-  _latest_cms_msc_end_to_msc_start_time_secs = _STW_timer.seconds();
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::msc_collection_begin: "
-      "mutator time %f",
-      _latest_cms_msc_end_to_msc_start_time_secs);
-  }
-  avg_msc_interval()->sample(_latest_cms_msc_end_to_msc_start_time_secs);
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::msc_collection_end(GCCause::Cause gc_cause) {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": msc_collection_end ");
-  }
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-        UseAdaptiveSizePolicyWithSystemGC) {
-    double msc_pause_in_seconds = _STW_timer.seconds();
-    if ((_latest_cms_msc_end_to_msc_start_time_secs > 0.0) &&
-        (msc_pause_in_seconds > 0.0)) {
-      avg_msc_pause()->sample(msc_pause_in_seconds);
-      double mutator_time_in_seconds = 0.0;
-      if (_latest_cms_collection_end_to_collection_start_secs == 0.0) {
-        // This assertion may fail because of time stamp granularity.
-        // Comment it out and investigate it at a later time.  The large
-        // time stamp granularity occurs on some older linux systems.
-#ifndef CLOCK_GRANULARITY_TOO_LARGE
-        assert((_latest_cms_concurrent_marking_time_secs == 0.0) &&
-               (_latest_cms_concurrent_precleaning_time_secs == 0.0) &&
-               (_latest_cms_concurrent_sweeping_time_secs == 0.0),
-          "There should not be any concurrent time");
-#endif
-        // A concurrent collection did not start.  Mutator time
-        // between collections comes from the STW MSC timer.
-        mutator_time_in_seconds = _latest_cms_msc_end_to_msc_start_time_secs;
-      } else {
-        // The concurrent collection did start so count the mutator
-        // time to the start of the concurrent collection.  In this
-        // case the _latest_cms_msc_end_to_msc_start_time_secs measures
-        // the time between the initial mark or remark and the
-        // start of the MSC.  That has no real meaning.
-        mutator_time_in_seconds = _latest_cms_collection_end_to_collection_start_secs;
-      }
-
-      double latest_cms_sum_concurrent_phases_time_secs =
-        concurrent_collection_time();
-      double interval_in_seconds =
-        mutator_time_in_seconds +
-        _latest_cms_initial_mark_start_to_end_time_secs +
-        _latest_cms_remark_start_to_end_time_secs +
-        latest_cms_sum_concurrent_phases_time_secs +
-        msc_pause_in_seconds;
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print_cr("  interval_in_seconds %f \n"
-          "     mutator_time_in_seconds %f \n"
-          "     _latest_cms_initial_mark_start_to_end_time_secs %f\n"
-          "     _latest_cms_remark_start_to_end_time_secs %f\n"
-          "     latest_cms_sum_concurrent_phases_time_secs %f\n"
-          "     msc_pause_in_seconds %f\n",
-          interval_in_seconds,
-          mutator_time_in_seconds,
-          _latest_cms_initial_mark_start_to_end_time_secs,
-          _latest_cms_remark_start_to_end_time_secs,
-          latest_cms_sum_concurrent_phases_time_secs,
-          msc_pause_in_seconds);
-      }
-
-      // The concurrent cost is wasted cost but it should be
-      // included.
-      double concurrent_cost = concurrent_collection_cost(interval_in_seconds);
-
-      // Initial mark and remark, also wasted.
-      double STW_time_in_seconds = _latest_cms_initial_mark_start_to_end_time_secs +
-        _latest_cms_remark_start_to_end_time_secs;
-      double STW_collection_cost =
-        collection_cost(STW_time_in_seconds, interval_in_seconds) +
-        concurrent_cost;
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print_cr(" msc_collection_end:\n"
-          "_latest_cms_collection_end_to_collection_start_secs %f\n"
-          "_latest_cms_msc_end_to_msc_start_time_secs %f\n"
-          "_latest_cms_initial_mark_start_to_end_time_secs %f\n"
-          "_latest_cms_remark_start_to_end_time_secs %f\n"
-          "latest_cms_sum_concurrent_phases_time_secs %f\n",
-          _latest_cms_collection_end_to_collection_start_secs,
-          _latest_cms_msc_end_to_msc_start_time_secs,
-          _latest_cms_initial_mark_start_to_end_time_secs,
-          _latest_cms_remark_start_to_end_time_secs,
-          latest_cms_sum_concurrent_phases_time_secs);
-
-        gclog_or_tty->print_cr(" msc_collection_end: \n"
-          "latest_cms_sum_concurrent_phases_time_secs %f\n"
-          "STW_time_in_seconds %f\n"
-          "msc_pause_in_seconds %f\n",
-          latest_cms_sum_concurrent_phases_time_secs,
-          STW_time_in_seconds,
-          msc_pause_in_seconds);
-      }
-
-      double cost = concurrent_cost + STW_collection_cost +
-        collection_cost(msc_pause_in_seconds, interval_in_seconds);
-
-      _avg_msc_gc_cost->sample(cost);
-
-      // Average this ms cost into all the other types gc costs
-      avg_major_gc_cost()->sample(cost);
-
-      // Sample for performance counter
-      _avg_msc_interval->sample(interval_in_seconds);
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print("cmsAdaptiveSizePolicy::msc_collection_end: "
-          "MSC gc cost: %f  average: %f", cost,
-          _avg_msc_gc_cost->average());
-
-        double msc_pause_in_ms = msc_pause_in_seconds * MILLIUNITS;
-        gclog_or_tty->print_cr("  MSC pause: %f (ms) MSC period %f (ms)",
-          msc_pause_in_ms, (double) interval_in_seconds * MILLIUNITS);
-      }
-    }
-  }
-
-  clear_internal_time_intervals();
-
-  // Can this call be put into the epilogue?
-  set_first_after_collection();
-
-  // The concurrent phases keeps track of it's own mutator interval
-  // with this timer.  This allows the stop-the-world phase to
-  // be included in the mutator time so that the stop-the-world time
-  // is not double counted.  Reset and start it.
-  _concurrent_timer.stop();
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_begin() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": ms_collection_begin ");
-  }
-  _STW_timer.stop();
-  _latest_cms_ms_end_to_ms_start = _STW_timer.seconds();
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::ms_collection_begin: "
-      "mutator time %f",
-      _latest_cms_ms_end_to_ms_start);
-  }
-  avg_ms_interval()->sample(_STW_timer.seconds());
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_end(GCCause::Cause gc_cause) {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print(" ");
-    gclog_or_tty->stamp();
-    gclog_or_tty->print(": ms_collection_end ");
-  }
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-        UseAdaptiveSizePolicyWithSystemGC) {
-    // The MS collection is a foreground collection that does all
-    // the parts of a mostly concurrent collection.
-    //
-    // For this collection include the cost of the
-    //  initial mark
-    //  remark
-    //  all concurrent time (scaled down by the
-    //    concurrent_processor_fraction).  Some
-    //    may be zero if the baton was passed before
-    //    it was reached.
-    //    concurrent marking
-    //    sweeping
-    //    resetting
-    //  STW after baton was passed (STW_in_foreground_in_seconds)
-    double STW_in_foreground_in_seconds = _STW_timer.seconds();
-
-    double latest_cms_sum_concurrent_phases_time_secs =
-      concurrent_collection_time();
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("\nCMSAdaptiveSizePolicy::ms_collection_end "
-        "STW_in_foreground_in_seconds %f "
-        "_latest_cms_initial_mark_start_to_end_time_secs %f "
-        "_latest_cms_remark_start_to_end_time_secs %f "
-        "latest_cms_sum_concurrent_phases_time_secs %f "
-        "_latest_cms_ms_marking_start_to_end_time_secs %f "
-        "_latest_cms_ms_end_to_ms_start %f",
-        STW_in_foreground_in_seconds,
-        _latest_cms_initial_mark_start_to_end_time_secs,
-        _latest_cms_remark_start_to_end_time_secs,
-        latest_cms_sum_concurrent_phases_time_secs,
-        _latest_cms_ms_marking_start_to_end_time_secs,
-        _latest_cms_ms_end_to_ms_start);
-    }
-
-    double STW_marking_in_seconds = _latest_cms_initial_mark_start_to_end_time_secs +
-      _latest_cms_remark_start_to_end_time_secs;
-#ifndef CLOCK_GRANULARITY_TOO_LARGE
-    assert(_latest_cms_ms_marking_start_to_end_time_secs == 0.0 ||
-           latest_cms_sum_concurrent_phases_time_secs == 0.0,
-           "marking done twice?");
-#endif
-    double ms_time_in_seconds = STW_marking_in_seconds +
-      STW_in_foreground_in_seconds +
-      _latest_cms_ms_marking_start_to_end_time_secs +
-      scaled_concurrent_collection_time();
-    avg_ms_pause()->sample(ms_time_in_seconds);
-    // Use the STW costs from the initial mark and remark plus
-    // the cost of the concurrent phase to calculate a
-    // collection cost.
-    double cost = 0.0;
-    if ((_latest_cms_ms_end_to_ms_start > 0.0) &&
-        (ms_time_in_seconds > 0.0)) {
-      double interval_in_seconds =
-        _latest_cms_ms_end_to_ms_start + ms_time_in_seconds;
-
-      if (PrintAdaptiveSizePolicy && Verbose) {
-        gclog_or_tty->print_cr("\n ms_time_in_seconds  %f  "
-          "latest_cms_sum_concurrent_phases_time_secs %f  "
-          "interval_in_seconds %f",
-          ms_time_in_seconds,
-          latest_cms_sum_concurrent_phases_time_secs,
-          interval_in_seconds);
-      }
-
-      cost = collection_cost(ms_time_in_seconds, interval_in_seconds);
-
-      _avg_ms_gc_cost->sample(cost);
-      // Average this ms cost into all the other types gc costs
-      avg_major_gc_cost()->sample(cost);
-
-      // Sample for performance counter
-      _avg_ms_interval->sample(interval_in_seconds);
-    }
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print("cmsAdaptiveSizePolicy::ms_collection_end: "
-        "MS gc cost: %f  average: %f", cost, _avg_ms_gc_cost->average());
-
-      double ms_time_in_ms = ms_time_in_seconds * MILLIUNITS;
-      gclog_or_tty->print_cr("  MS pause: %f (ms) MS period %f (ms)",
-        ms_time_in_ms,
-        _latest_cms_ms_end_to_ms_start * MILLIUNITS);
-    }
-  }
-
-  // Consider putting this code (here to end) into a
-  // method for convenience.
-  clear_internal_time_intervals();
-
-  set_first_after_collection();
-
-  // The concurrent phases keeps track of it's own mutator interval
-  // with this timer.  This allows the stop-the-world phase to
-  // be included in the mutator time so that the stop-the-world time
-  // is not double counted.  Reset and start it.
-  _concurrent_timer.stop();
-  _concurrent_timer.reset();
-  _concurrent_timer.start();
-
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::clear_internal_time_intervals() {
-  _latest_cms_reset_end_to_initial_mark_start_secs = 0.0;
-  _latest_cms_initial_mark_end_to_remark_start_secs = 0.0;
-  _latest_cms_collection_end_to_collection_start_secs = 0.0;
-  _latest_cms_concurrent_marking_time_secs = 0.0;
-  _latest_cms_concurrent_precleaning_time_secs = 0.0;
-  _latest_cms_concurrent_sweeping_time_secs = 0.0;
-  _latest_cms_msc_end_to_msc_start_time_secs = 0.0;
-  _latest_cms_ms_end_to_ms_start = 0.0;
-  _latest_cms_remark_start_to_end_time_secs = 0.0;
-  _latest_cms_initial_mark_start_to_end_time_secs = 0.0;
-  _latest_cms_ms_marking_start_to_end_time_secs = 0.0;
-}
-
-void CMSAdaptiveSizePolicy::clear_generation_free_space_flags() {
-  AdaptiveSizePolicy::clear_generation_free_space_flags();
-
-  set_change_young_gen_for_maj_pauses(0);
-}
-
-void CMSAdaptiveSizePolicy::concurrent_phases_resume() {
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->stamp();
-    gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::concurrent_phases_resume()");
-  }
-  _concurrent_timer.start();
-}
-
-double CMSAdaptiveSizePolicy::time_since_major_gc() const {
-  _concurrent_timer.stop();
-  double time_since_cms_gc = _concurrent_timer.seconds();
-  _concurrent_timer.start();
-  _STW_timer.stop();
-  double time_since_STW_gc = _STW_timer.seconds();
-  _STW_timer.start();
-
-  return MIN2(time_since_cms_gc, time_since_STW_gc);
-}
-
-double CMSAdaptiveSizePolicy::major_gc_interval_average_for_decay() const {
-  double cms_interval = _avg_concurrent_interval->average();
-  double msc_interval = _avg_msc_interval->average();
-  double ms_interval = _avg_ms_interval->average();
-
-  return MAX3(cms_interval, msc_interval, ms_interval);
-}
-
-double CMSAdaptiveSizePolicy::cms_gc_cost() const {
-  return avg_major_gc_cost()->average();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_marking_begin() {
-  _STW_timer.stop();
-  // Start accumulating time for the marking in the STW timer.
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-void CMSAdaptiveSizePolicy::ms_collection_marking_end(
-    GCCause::Cause gc_cause) {
-  _STW_timer.stop();
-  if (gc_cause != GCCause::_java_lang_system_gc ||
-      UseAdaptiveSizePolicyWithSystemGC) {
-    _latest_cms_ms_marking_start_to_end_time_secs = _STW_timer.seconds();
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("CMSAdaptiveSizePolicy::"
-        "msc_collection_marking_end: mutator time %f",
-        _latest_cms_ms_marking_start_to_end_time_secs);
-    }
-  }
-  _STW_timer.reset();
-  _STW_timer.start();
-}
-
-double CMSAdaptiveSizePolicy::gc_cost() const {
-  double cms_gen_cost = cms_gc_cost();
-  double result =  MIN2(1.0, minor_gc_cost() + cms_gen_cost);
-  assert(result >= 0.0, "Both minor and major costs are non-negative");
-  return result;
-}
-
-// Cost of collection (unit-less)
-double CMSAdaptiveSizePolicy::collection_cost(double pause_in_seconds,
-                                              double interval_in_seconds) {
-  // Cost of collection (unit-less)
-  double cost = 0.0;
-  if ((interval_in_seconds > 0.0) &&
-      (pause_in_seconds > 0.0)) {
-    cost =
-      pause_in_seconds / interval_in_seconds;
-  }
-  return cost;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_eden_for_pause_time(size_t cur_eden) {
-  size_t change = 0;
-  size_t desired_eden = cur_eden;
-
-  // reduce eden size
-  change = eden_decrement_aligned_down(cur_eden);
-  desired_eden = cur_eden - change;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_eden_for_pause_time "
-      "adjusting eden for pause time. "
-      " starting eden size " SIZE_FORMAT
-      " reduced eden size " SIZE_FORMAT
-      " eden delta " SIZE_FORMAT,
-      cur_eden, desired_eden, change);
-  }
-
-  return desired_eden;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_eden_for_throughput(size_t cur_eden) {
-
-  size_t desired_eden = cur_eden;
-
-  set_change_young_gen_for_throughput(increase_young_gen_for_througput_true);
-
-  size_t change = eden_increment_aligned_up(cur_eden);
-  size_t scaled_change = scale_by_gen_gc_cost(change, minor_gc_cost());
-
-  if (cur_eden + scaled_change > cur_eden) {
-    desired_eden = cur_eden + scaled_change;
-  }
-
-  _young_gen_change_for_minor_throughput++;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_eden_for_throughput "
-      "adjusting eden for throughput. "
-      " starting eden size " SIZE_FORMAT
-      " increased eden size " SIZE_FORMAT
-      " eden delta " SIZE_FORMAT,
-      cur_eden, desired_eden, scaled_change);
-  }
-
-  return desired_eden;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_eden_for_footprint(size_t cur_eden) {
-
-  set_decrease_for_footprint(decrease_young_gen_for_footprint_true);
-
-  size_t change = eden_decrement(cur_eden);
-  size_t desired_eden_size = cur_eden - change;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_eden_for_footprint "
-      "adjusting eden for footprint. "
-      " starting eden size " SIZE_FORMAT
-      " reduced eden size " SIZE_FORMAT
-      " eden delta " SIZE_FORMAT,
-      cur_eden, desired_eden_size, change);
-  }
-  return desired_eden_size;
-}
-
-// The eden and promo versions should be combined if possible.
-// They are the same except that the sizes of the decrement
-// and increment are different for eden and promo.
-size_t CMSAdaptiveSizePolicy::eden_decrement_aligned_down(size_t cur_eden) {
-  size_t delta = eden_decrement(cur_eden);
-  return align_size_down(delta, generation_alignment());
-}
-
-size_t CMSAdaptiveSizePolicy::eden_increment_aligned_up(size_t cur_eden) {
-  size_t delta = eden_increment(cur_eden);
-  return align_size_up(delta, generation_alignment());
-}
-
-size_t CMSAdaptiveSizePolicy::promo_decrement_aligned_down(size_t cur_promo) {
-  size_t delta = promo_decrement(cur_promo);
-  return align_size_down(delta, generation_alignment());
-}
-
-size_t CMSAdaptiveSizePolicy::promo_increment_aligned_up(size_t cur_promo) {
-  size_t delta = promo_increment(cur_promo);
-  return align_size_up(delta, generation_alignment());
-}
-
-
-void CMSAdaptiveSizePolicy::compute_eden_space_size(size_t cur_eden,
-                                                    size_t max_eden_size)
-{
-  size_t desired_eden_size = cur_eden;
-  size_t eden_limit = max_eden_size;
-
-  // Printout input
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_eden_space_size: "
-      "cur_eden " SIZE_FORMAT,
-      cur_eden);
-  }
-
-  // Used for diagnostics
-  clear_generation_free_space_flags();
-
-  if (_avg_minor_pause->padded_average() > gc_pause_goal_sec()) {
-    if (minor_pause_young_estimator()->decrement_will_decrease()) {
-      // If the minor pause is too long, shrink the young gen.
-      set_change_young_gen_for_min_pauses(
-        decrease_young_gen_for_min_pauses_true);
-      desired_eden_size = adjust_eden_for_pause_time(desired_eden_size);
-    }
-  } else if ((avg_remark_pause()->padded_average() > gc_pause_goal_sec()) ||
-             (avg_initial_pause()->padded_average() > gc_pause_goal_sec())) {
-    // The remark or initial pauses are not meeting the goal.  Should
-    // the generation be shrunk?
-    if (get_and_clear_first_after_collection() &&
-        ((avg_remark_pause()->padded_average() > gc_pause_goal_sec() &&
-          remark_pause_young_estimator()->decrement_will_decrease()) ||
-         (avg_initial_pause()->padded_average() > gc_pause_goal_sec() &&
-          initial_pause_young_estimator()->decrement_will_decrease()))) {
-
-       set_change_young_gen_for_maj_pauses(
-         decrease_young_gen_for_maj_pauses_true);
-
-      // If the remark or initial pause is too long and this is the
-      // first young gen collection after a cms collection, shrink
-      // the young gen.
-      desired_eden_size = adjust_eden_for_pause_time(desired_eden_size);
-    }
-    // If not the first young gen collection after a cms collection,
-    // don't do anything.  In this case an adjustment has already
-    // been made and the results of the adjustment has not yet been
-    // measured.
-  } else if ((minor_gc_cost() >= 0.0) &&
-             (adjusted_mutator_cost() < _throughput_goal)) {
-    desired_eden_size = adjust_eden_for_throughput(desired_eden_size);
-  } else {
-    desired_eden_size = adjust_eden_for_footprint(desired_eden_size);
-  }
-
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_eden_space_size limits:"
-      " desired_eden_size: " SIZE_FORMAT
-      " old_eden_size: " SIZE_FORMAT,
-      desired_eden_size, cur_eden);
-  }
-
-  set_eden_size(desired_eden_size);
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_promo_for_pause_time(size_t cur_promo) {
-  size_t change = 0;
-  size_t desired_promo = cur_promo;
-  // Move this test up to caller like the adjust_eden_for_pause_time()
-  // call.
-  if ((AdaptiveSizePausePolicy == 0) &&
-      ((avg_remark_pause()->padded_average() > gc_pause_goal_sec()) ||
-      (avg_initial_pause()->padded_average() > gc_pause_goal_sec()))) {
-    set_change_old_gen_for_maj_pauses(decrease_old_gen_for_maj_pauses_true);
-    change = promo_decrement_aligned_down(cur_promo);
-    desired_promo = cur_promo - change;
-  } else if ((AdaptiveSizePausePolicy > 0) &&
-      (((avg_remark_pause()->padded_average() > gc_pause_goal_sec()) &&
-       remark_pause_old_estimator()->decrement_will_decrease()) ||
-      ((avg_initial_pause()->padded_average() > gc_pause_goal_sec()) &&
-       initial_pause_old_estimator()->decrement_will_decrease()))) {
-    set_change_old_gen_for_maj_pauses(decrease_old_gen_for_maj_pauses_true);
-    change = promo_decrement_aligned_down(cur_promo);
-    desired_promo = cur_promo - change;
-  }
-
-  if ((change != 0) &&PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_promo_for_pause_time "
-      "adjusting promo for pause time. "
-      " starting promo size " SIZE_FORMAT
-      " reduced promo size " SIZE_FORMAT
-      " promo delta " SIZE_FORMAT,
-      cur_promo, desired_promo, change);
-  }
-
-  return desired_promo;
-}
-
-// Try to share this with PS.
-size_t CMSAdaptiveSizePolicy::scale_by_gen_gc_cost(size_t base_change,
-                                                  double gen_gc_cost) {
-
-  // Calculate the change to use for the tenured gen.
-  size_t scaled_change = 0;
-  // Can the increment to the generation be scaled?
-  if (gc_cost() >= 0.0 && gen_gc_cost >= 0.0) {
-    double scale_by_ratio = gen_gc_cost / gc_cost();
-    scaled_change =
-      (size_t) (scale_by_ratio * (double) base_change);
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(
-        "Scaled tenured increment: " SIZE_FORMAT " by %f down to "
-          SIZE_FORMAT,
-        base_change, scale_by_ratio, scaled_change);
-    }
-  } else if (gen_gc_cost >= 0.0) {
-    // Scaling is not going to work.  If the major gc time is the
-    // larger than the other GC costs, give it a full increment.
-    if (gen_gc_cost >= (gc_cost() - gen_gc_cost)) {
-      scaled_change = base_change;
-    }
-  } else {
-    // Don't expect to get here but it's ok if it does
-    // in the product build since the delta will be 0
-    // and nothing will change.
-    assert(false, "Unexpected value for gc costs");
-  }
-
-  return scaled_change;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_promo_for_throughput(size_t cur_promo) {
-
-  size_t desired_promo = cur_promo;
-
-  set_change_old_gen_for_throughput(increase_old_gen_for_throughput_true);
-
-  size_t change = promo_increment_aligned_up(cur_promo);
-  size_t scaled_change = scale_by_gen_gc_cost(change, major_gc_cost());
-
-  if (cur_promo + scaled_change > cur_promo) {
-    desired_promo = cur_promo + scaled_change;
-  }
-
-  _old_gen_change_for_major_throughput++;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_promo_for_throughput "
-      "adjusting promo for throughput. "
-      " starting promo size " SIZE_FORMAT
-      " increased promo size " SIZE_FORMAT
-      " promo delta " SIZE_FORMAT,
-      cur_promo, desired_promo, scaled_change);
-  }
-
-  return desired_promo;
-}
-
-size_t CMSAdaptiveSizePolicy::adjust_promo_for_footprint(size_t cur_promo,
-                                                         size_t cur_eden) {
-
-  set_decrease_for_footprint(decrease_young_gen_for_footprint_true);
-
-  size_t change = promo_decrement(cur_promo);
-  size_t desired_promo_size = cur_promo - change;
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::adjust_promo_for_footprint "
-      "adjusting promo for footprint. "
-      " starting promo size " SIZE_FORMAT
-      " reduced promo size " SIZE_FORMAT
-      " promo delta " SIZE_FORMAT,
-      cur_promo, desired_promo_size, change);
-  }
-  return desired_promo_size;
-}
-
-void CMSAdaptiveSizePolicy::compute_tenured_generation_free_space(
-                                size_t cur_tenured_free,
-                                size_t max_tenured_available,
-                                size_t cur_eden) {
-  // This can be bad if the desired value grows/shrinks without
-  // any connection to the read free space
-  size_t desired_promo_size = promo_size();
-  size_t tenured_limit = max_tenured_available;
-
-  // Printout input
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_tenured_generation_free_space: "
-      "cur_tenured_free " SIZE_FORMAT
-      " max_tenured_available " SIZE_FORMAT,
-      cur_tenured_free, max_tenured_available);
-  }
-
-  // Used for diagnostics
-  clear_generation_free_space_flags();
-
-  set_decide_at_full_gc(decide_at_full_gc_true);
-  if (avg_remark_pause()->padded_average() > gc_pause_goal_sec() ||
-      avg_initial_pause()->padded_average() > gc_pause_goal_sec()) {
-    desired_promo_size = adjust_promo_for_pause_time(cur_tenured_free);
-  } else if (avg_minor_pause()->padded_average() > gc_pause_goal_sec()) {
-    // Nothing to do since the minor collections are too large and
-    // this method only deals with the cms generation.
-  } else if ((cms_gc_cost() >= 0.0) &&
-             (adjusted_mutator_cost() < _throughput_goal)) {
-    desired_promo_size = adjust_promo_for_throughput(cur_tenured_free);
-  } else {
-    desired_promo_size = adjust_promo_for_footprint(cur_tenured_free,
-                                                    cur_eden);
-  }
-
-  if (PrintGC && PrintAdaptiveSizePolicy) {
-    gclog_or_tty->print_cr(
-      "CMSAdaptiveSizePolicy::compute_tenured_generation_free_space limits:"
-      " desired_promo_size: " SIZE_FORMAT
-      " old_promo_size: " SIZE_FORMAT,
-      desired_promo_size, cur_tenured_free);
-  }
-
-  set_promo_size(desired_promo_size);
-}
-
-uint CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
-                                             bool is_survivor_overflow,
-                                             uint tenuring_threshold,
-                                             size_t survivor_limit) {
-  assert(survivor_limit >= generation_alignment(),
-         "survivor_limit too small");
-  assert((size_t)align_size_down(survivor_limit, generation_alignment())
-         == survivor_limit, "survivor_limit not aligned");
-
-  // Change UsePSAdaptiveSurvivorSizePolicy -> UseAdaptiveSurvivorSizePolicy?
-  if (!UsePSAdaptiveSurvivorSizePolicy ||
-      !young_gen_policy_is_ready()) {
-    return tenuring_threshold;
-  }
-
-  // We'll decide whether to increase or decrease the tenuring
-  // threshold based partly on the newly computed survivor size
-  // (if we hit the maximum limit allowed, we'll always choose to
-  // decrement the threshold).
-  bool incr_tenuring_threshold = false;
-  bool decr_tenuring_threshold = false;
-
-  set_decrement_tenuring_threshold_for_gc_cost(false);
-  set_increment_tenuring_threshold_for_gc_cost(false);
-  set_decrement_tenuring_threshold_for_survivor_limit(false);
-
-  if (!is_survivor_overflow) {
-    // Keep running averages on how much survived
-
-    // We use the tenuring threshold to equalize the cost of major
-    // and minor collections.
-    // ThresholdTolerance is used to indicate how sensitive the
-    // tenuring threshold is to differences in cost between the
-    // collection types.
-
-    // Get the times of interest. This involves a little work, so
-    // we cache the values here.
-    const double major_cost = major_gc_cost();
-    const double minor_cost = minor_gc_cost();
-
-    if (minor_cost > major_cost * _threshold_tolerance_percent) {
-      // Minor times are getting too long;  lower the threshold so
-      // less survives and more is promoted.
-      decr_tenuring_threshold = true;
-      set_decrement_tenuring_threshold_for_gc_cost(true);
-    } else if (major_cost > minor_cost * _threshold_tolerance_percent) {
-      // Major times are too long, so we want less promotion.
-      incr_tenuring_threshold = true;
-      set_increment_tenuring_threshold_for_gc_cost(true);
-    }
-
-  } else {
-    // Survivor space overflow occurred, so promoted and survived are
-    // not accurate. We'll make our best guess by combining survived
-    // and promoted and count them as survivors.
-    //
-    // We'll lower the tenuring threshold to see if we can correct
-    // things. Also, set the survivor size conservatively. We're
-    // trying to avoid many overflows from occurring if defnew size
-    // is just too small.
-
-    decr_tenuring_threshold = true;
-  }
-
-  // The padded average also maintains a deviation from the average;
-  // we use this to see how good of an estimate we have of what survived.
-  // We're trying to pad the survivor size as little as possible without
-  // overflowing the survivor spaces.
-  size_t target_size = align_size_up((size_t)_avg_survived->padded_average(),
-                                     generation_alignment());
-  target_size = MAX2(target_size, generation_alignment());
-
-  if (target_size > survivor_limit) {
-    // Target size is bigger than we can handle. Let's also reduce
-    // the tenuring threshold.
-    target_size = survivor_limit;
-    decr_tenuring_threshold = true;
-    set_decrement_tenuring_threshold_for_survivor_limit(true);
-  }
-
-  // Finally, increment or decrement the tenuring threshold, as decided above.
-  // We test for decrementing first, as we might have hit the target size
-  // limit.
-  if (decr_tenuring_threshold && !(AlwaysTenure || NeverTenure)) {
-    if (tenuring_threshold > 1) {
-      tenuring_threshold--;
-    }
-  } else if (incr_tenuring_threshold && !(AlwaysTenure || NeverTenure)) {
-    if (tenuring_threshold < MaxTenuringThreshold) {
-      tenuring_threshold++;
-    }
-  }
-
-  // We keep a running average of the amount promoted which is used
-  // to decide when we should collect the old generation (when
-  // the amount of old gen free space is less than what we expect to
-  // promote).
-
-  if (PrintAdaptiveSizePolicy) {
-    // A little more detail if Verbose is on
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    if (Verbose) {
-      gclog_or_tty->print( "  avg_survived: %f"
-                  "  avg_deviation: %f",
-                  _avg_survived->average(),
-                  _avg_survived->deviation());
-    }
-
-    gclog_or_tty->print( "  avg_survived_padded_avg: %f",
-                _avg_survived->padded_average());
-
-    if (Verbose) {
-      gclog_or_tty->print( "  avg_promoted_avg: %f"
-                  "  avg_promoted_dev: %f",
-                  gch->gc_stats(1)->avg_promoted()->average(),
-                  gch->gc_stats(1)->avg_promoted()->deviation());
-    }
-
-    gclog_or_tty->print( "  avg_promoted_padded_avg: %f"
-                "  avg_pretenured_padded_avg: %f"
-                "  tenuring_thresh: %u"
-                "  target_size: " SIZE_FORMAT
-                "  survivor_limit: " SIZE_FORMAT,
-                gch->gc_stats(1)->avg_promoted()->padded_average(),
-                _avg_pretenured->padded_average(),
-                tenuring_threshold, target_size, survivor_limit);
-    gclog_or_tty->cr();
-  }
-
-  set_survivor_size(target_size);
-
-  return tenuring_threshold;
-}
-
-bool CMSAdaptiveSizePolicy::get_and_clear_first_after_collection() {
-  bool result = _first_after_collection;
-  _first_after_collection = false;
-  return result;
-}
-
-bool CMSAdaptiveSizePolicy::print_adaptive_size_policy_on(
-                                                    outputStream* st) const {
-
-  if (!UseAdaptiveSizePolicy) {
-    return false;
-  }
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  Generation* young = gch->get_gen(0);
-  DefNewGeneration* def_new = young->as_DefNewGeneration();
-  return AdaptiveSizePolicy::print_adaptive_size_policy_on(
-                                         st,
-                                         def_new->tenuring_threshold());
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,477 +0,0 @@
-/*
- * Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
-
-#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
-#include "runtime/timer.hpp"
-
-// This class keeps statistical information and computes the
-// size of the heap for the concurrent mark sweep collector.
-//
-// Cost for garbage collector include cost for
-//   minor collection
-//   concurrent collection
-//      stop-the-world component
-//      concurrent component
-//   major compacting collection
-//      uses decaying cost
-
-// Forward decls
-class elapsedTimer;
-
-class CMSAdaptiveSizePolicy : public AdaptiveSizePolicy {
- friend class CMSGCAdaptivePolicyCounters;
- friend class CMSCollector;
- private:
-
-  // Total number of processors available
-  int _processor_count;
-  // Number of processors used by the concurrent phases of GC
-  // This number is assumed to be the same for all concurrent
-  // phases.
-  int _concurrent_processor_count;
-
-  // Time that the mutators run exclusive of a particular
-  // phase.  For example, the time the mutators run excluding
-  // the time during which the cms collector runs concurrently
-  // with the mutators.
-  //   Between end of most recent cms reset and start of initial mark
-                // This may be redundant
-  double _latest_cms_reset_end_to_initial_mark_start_secs;
-  //   Between end of the most recent initial mark and start of remark
-  double _latest_cms_initial_mark_end_to_remark_start_secs;
-  //   Between end of most recent collection and start of
-  //   a concurrent collection
-  double _latest_cms_collection_end_to_collection_start_secs;
-  //   Times of the concurrent phases of the most recent
-  //   concurrent collection
-  double _latest_cms_concurrent_marking_time_secs;
-  double _latest_cms_concurrent_precleaning_time_secs;
-  double _latest_cms_concurrent_sweeping_time_secs;
-  //   Between end of most recent STW MSC and start of next STW MSC
-  double _latest_cms_msc_end_to_msc_start_time_secs;
-  //   Between end of most recent MS and start of next MS
-  //   This does not include any time spent during a concurrent
-  // collection.
-  double _latest_cms_ms_end_to_ms_start;
-  //   Between start and end of the initial mark of the most recent
-  // concurrent collection.
-  double _latest_cms_initial_mark_start_to_end_time_secs;
-  //   Between start and end of the remark phase of the most recent
-  // concurrent collection
-  double _latest_cms_remark_start_to_end_time_secs;
-  //   Between start and end of the most recent MS STW marking phase
-  double _latest_cms_ms_marking_start_to_end_time_secs;
-
-  // Pause time timers
-  static elapsedTimer _STW_timer;
-  // Concurrent collection timer.  Used for total of all concurrent phases
-  // during 1 collection cycle.
-  static elapsedTimer _concurrent_timer;
-
-  // When the size of the generation is changed, the size
-  // of the change will rounded up or down (depending on the
-  // type of change) by this value.
-  size_t _generation_alignment;
-
-  // If this variable is true, the size of the young generation
-  // may be changed in order to reduce the pause(s) of the
-  // collection of the tenured generation in order to meet the
-  // pause time goal.  It is common to change the size of the
-  // tenured generation in order to meet the pause time goal
-  // for the tenured generation.  With the CMS collector for
-  // the tenured generation, the size of the young generation
-  // can have an significant affect on the pause times for collecting the
-  // tenured generation.
-  // This is a duplicate of a variable in PSAdaptiveSizePolicy.  It
-  // is duplicated because it is not clear that it is general enough
-  // to go into AdaptiveSizePolicy.
-  int _change_young_gen_for_maj_pauses;
-
-  // Variable that is set to true after a collection.
-  bool _first_after_collection;
-
-  // Fraction of collections that are of each type
-  double concurrent_fraction() const;
-  double STW_msc_fraction() const;
-  double STW_ms_fraction() const;
-
-  // This call cannot be put into the epilogue as long as some
-  // of the counters can be set during concurrent phases.
-  virtual void clear_generation_free_space_flags();
-
-  void set_first_after_collection() { _first_after_collection = true; }
-
- protected:
-  // Average of the sum of the concurrent times for
-  // one collection in seconds.
-  AdaptiveWeightedAverage* _avg_concurrent_time;
-  // Average time between concurrent collections in seconds.
-  AdaptiveWeightedAverage* _avg_concurrent_interval;
-  // Average cost of the concurrent part of a collection
-  // in seconds.
-  AdaptiveWeightedAverage* _avg_concurrent_gc_cost;
-
-  // Average of the initial pause of a concurrent collection in seconds.
-  AdaptivePaddedAverage* _avg_initial_pause;
-  // Average of the remark pause of a concurrent collection in seconds.
-  AdaptivePaddedAverage* _avg_remark_pause;
-
-  // Average of the stop-the-world (STW) (initial mark + remark)
-  // times in seconds for concurrent collections.
-  AdaptiveWeightedAverage* _avg_cms_STW_time;
-  // Average of the STW collection cost for concurrent collections.
-  AdaptiveWeightedAverage* _avg_cms_STW_gc_cost;
-
-  // Average of the bytes free at the start of the sweep.
-  AdaptiveWeightedAverage* _avg_cms_free_at_sweep;
-  // Average of the bytes free at the end of the collection.
-  AdaptiveWeightedAverage* _avg_cms_free;
-  // Average of the bytes promoted between cms collections.
-  AdaptiveWeightedAverage* _avg_cms_promo;
-
-  // stop-the-world (STW) mark-sweep-compact
-  // Average of the pause time in seconds for STW mark-sweep-compact
-  // collections.
-  AdaptiveWeightedAverage* _avg_msc_pause;
-  // Average of the interval in seconds between STW mark-sweep-compact
-  // collections.
-  AdaptiveWeightedAverage* _avg_msc_interval;
-  // Average of the collection costs for STW mark-sweep-compact
-  // collections.
-  AdaptiveWeightedAverage* _avg_msc_gc_cost;
-
-  // Averages for mark-sweep collections.
-  // The collection may have started as a background collection
-  // that completes in a stop-the-world (STW) collection.
-  // Average of the pause time in seconds for mark-sweep
-  // collections.
-  AdaptiveWeightedAverage* _avg_ms_pause;
-  // Average of the interval in seconds between mark-sweep
-  // collections.
-  AdaptiveWeightedAverage* _avg_ms_interval;
-  // Average of the collection costs for mark-sweep
-  // collections.
-  AdaptiveWeightedAverage* _avg_ms_gc_cost;
-
-  // These variables contain a linear fit of
-  // a generation size as the independent variable
-  // and a pause time as the dependent variable.
-  // For example _remark_pause_old_estimator
-  // is a fit of the old generation size as the
-  // independent variable and the remark pause
-  // as the dependent variable.
-  //   remark pause time vs. cms gen size
-  LinearLeastSquareFit* _remark_pause_old_estimator;
-  //   initial pause time vs. cms gen size
-  LinearLeastSquareFit* _initial_pause_old_estimator;
-  //   remark pause time vs. young gen size
-  LinearLeastSquareFit* _remark_pause_young_estimator;
-  //   initial pause time vs. young gen size
-  LinearLeastSquareFit* _initial_pause_young_estimator;
-
-  // Accessors
-  int processor_count() const { return _processor_count; }
-  int concurrent_processor_count() const { return _concurrent_processor_count; }
-
-  AdaptiveWeightedAverage* avg_concurrent_time() const {
-    return _avg_concurrent_time;
-  }
-
-  AdaptiveWeightedAverage* avg_concurrent_interval() const {
-    return _avg_concurrent_interval;
-  }
-
-  AdaptiveWeightedAverage* avg_concurrent_gc_cost() const {
-    return _avg_concurrent_gc_cost;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_STW_time() const {
-    return _avg_cms_STW_time;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_STW_gc_cost() const {
-    return _avg_cms_STW_gc_cost;
-  }
-
-  AdaptivePaddedAverage* avg_initial_pause() const {
-    return _avg_initial_pause;
-  }
-
-  AdaptivePaddedAverage* avg_remark_pause() const {
-    return _avg_remark_pause;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_free() const {
-    return _avg_cms_free;
-  }
-
-  AdaptiveWeightedAverage* avg_cms_free_at_sweep() const {
-    return _avg_cms_free_at_sweep;
-  }
-
-  AdaptiveWeightedAverage* avg_msc_pause() const {
-    return _avg_msc_pause;
-  }
-
-  AdaptiveWeightedAverage* avg_msc_interval() const {
-    return _avg_msc_interval;
-  }
-
-  AdaptiveWeightedAverage* avg_msc_gc_cost() const {
-    return _avg_msc_gc_cost;
-  }
-
-  AdaptiveWeightedAverage* avg_ms_pause() const {
-    return _avg_ms_pause;
-  }
-
-  AdaptiveWeightedAverage* avg_ms_interval() const {
-    return _avg_ms_interval;
-  }
-
-  AdaptiveWeightedAverage* avg_ms_gc_cost() const {
-    return _avg_ms_gc_cost;
-  }
-
-  LinearLeastSquareFit* remark_pause_old_estimator() {
-    return _remark_pause_old_estimator;
-  }
-  LinearLeastSquareFit* initial_pause_old_estimator() {
-    return _initial_pause_old_estimator;
-  }
-  LinearLeastSquareFit* remark_pause_young_estimator() {
-    return _remark_pause_young_estimator;
-  }
-  LinearLeastSquareFit* initial_pause_young_estimator() {
-    return _initial_pause_young_estimator;
-  }
-
-  // These *slope() methods return the slope
-  // m for the linear fit of an independent
-  // variable vs. a dependent variable.  For
-  // example
-  //  remark_pause = m * old_generation_size + c
-  // These may be used to determine if an
-  // adjustment should be made to achieve a goal.
-  // For example, if remark_pause_old_slope() is
-  // positive, a reduction of the old generation
-  // size has on average resulted in the reduction
-  // of the remark pause.
-  float remark_pause_old_slope() {
-    return _remark_pause_old_estimator->slope();
-  }
-
-  float initial_pause_old_slope() {
-    return _initial_pause_old_estimator->slope();
-  }
-
-  float remark_pause_young_slope() {
-    return _remark_pause_young_estimator->slope();
-  }
-
-  float initial_pause_young_slope() {
-    return _initial_pause_young_estimator->slope();
-  }
-
-  // Update estimators
-  void update_minor_pause_old_estimator(double minor_pause_in_ms);
-
-  // Fraction of processors used by the concurrent phases.
-  double concurrent_processor_fraction();
-
-  // Returns the total times for the concurrent part of the
-  // latest collection in seconds.
-  double concurrent_collection_time();
-
-  // Return the total times for the concurrent part of the
-  // latest collection in seconds where the times of the various
-  // concurrent phases are scaled by the processor fraction used
-  // during the phase.
-  double scaled_concurrent_collection_time();
-
-  // Dimensionless concurrent GC cost for all the concurrent phases.
-  double concurrent_collection_cost(double interval_in_seconds);
-
-  // Dimensionless GC cost
-  double collection_cost(double pause_in_seconds, double interval_in_seconds);
-
-  virtual GCPolicyKind kind() const { return _gc_cms_adaptive_size_policy; }
-
-  virtual double time_since_major_gc() const;
-
-  // This returns the maximum average for the concurrent, ms, and
-  // msc collections.  This is meant to be used for the calculation
-  // of the decayed major gc cost and is not in general the
-  // average of all the different types of major collections.
-  virtual double major_gc_interval_average_for_decay() const;
-
- public:
-  CMSAdaptiveSizePolicy(size_t init_eden_size,
-                        size_t init_promo_size,
-                        size_t init_survivor_size,
-                        double max_gc_minor_pause_sec,
-                        double max_gc_pause_sec,
-                        uint gc_cost_ratio);
-
-  // The timers for the stop-the-world phases measure a total
-  // stop-the-world time.  The timer is started and stopped
-  // for each phase but is only reset after the final checkpoint.
-  void checkpoint_roots_initial_begin();
-  void checkpoint_roots_initial_end(GCCause::Cause gc_cause);
-  void checkpoint_roots_final_begin();
-  void checkpoint_roots_final_end(GCCause::Cause gc_cause);
-
-  // Methods for gathering information about the
-  // concurrent marking phase of the collection.
-  // Records the mutator times and
-  // resets the concurrent timer.
-  void concurrent_marking_begin();
-  // Resets concurrent phase timer in the begin methods and
-  // saves the time for a phase in the end methods.
-  void concurrent_marking_end();
-  void concurrent_sweeping_begin();
-  void concurrent_sweeping_end();
-  // Similar to the above (e.g., concurrent_marking_end()) and
-  // is used for both the precleaning an abortable precleaning
-  // phases.
-  void concurrent_precleaning_begin();
-  void concurrent_precleaning_end();
-  // Stops the concurrent phases time.  Gathers
-  // information and resets the timer.
-  void concurrent_phases_end(GCCause::Cause gc_cause,
-                              size_t cur_eden,
-                              size_t cur_promo);
-
-  // Methods for gather information about STW Mark-Sweep-Compact
-  void msc_collection_begin();
-  void msc_collection_end(GCCause::Cause gc_cause);
-
-  // Methods for gather information about Mark-Sweep done
-  // in the foreground.
-  void ms_collection_begin();
-  void ms_collection_end(GCCause::Cause gc_cause);
-
-  // Cost for a mark-sweep tenured gen collection done in the foreground
-  double ms_gc_cost() const {
-    return MAX2(0.0F, _avg_ms_gc_cost->average());
-  }
-
-  // Cost of collecting the tenured generation.  Includes
-  // concurrent collection and STW collection costs
-  double cms_gc_cost() const;
-
-  // Cost of STW mark-sweep-compact tenured gen collection.
-  double msc_gc_cost() const {
-    return MAX2(0.0F, _avg_msc_gc_cost->average());
-  }
-
-  //
-  double compacting_gc_cost() const {
-    double result = MIN2(1.0, minor_gc_cost() + msc_gc_cost());
-    assert(result >= 0.0, "Both minor and major costs are non-negative");
-    return result;
-  }
-
-   // Restarts the concurrent phases timer.
-   void concurrent_phases_resume();
-
-   // Time beginning and end of the marking phase for
-   // a synchronous MS collection.  A MS collection
-   // that finishes in the foreground can have started
-   // in the background.  These methods capture the
-   // completion of the marking (after the initial
-   // marking) that is done in the foreground.
-   void ms_collection_marking_begin();
-   void ms_collection_marking_end(GCCause::Cause gc_cause);
-
-   static elapsedTimer* concurrent_timer_ptr() {
-     return &_concurrent_timer;
-   }
-
-  AdaptiveWeightedAverage* avg_cms_promo() const {
-    return _avg_cms_promo;
-  }
-
-  int change_young_gen_for_maj_pauses() {
-    return _change_young_gen_for_maj_pauses;
-  }
-  void set_change_young_gen_for_maj_pauses(int v) {
-    _change_young_gen_for_maj_pauses = v;
-  }
-
-  void clear_internal_time_intervals();
-
-
-  // Either calculated_promo_size_in_bytes() or promo_size()
-  // should be deleted.
-  size_t promo_size() { return _promo_size; }
-  void set_promo_size(size_t v) { _promo_size = v; }
-
-  // Cost of GC for all types of collections.
-  virtual double gc_cost() const;
-
-  size_t generation_alignment() { return _generation_alignment; }
-
-  virtual void compute_eden_space_size(size_t cur_eden,
-                                       size_t max_eden_size);
-  // Calculates new survivor space size;  returns a new tenuring threshold
-  // value. Stores new survivor size in _survivor_size.
-  virtual uint compute_survivor_space_size_and_threshold(
-                                                bool   is_survivor_overflow,
-                                                uint   tenuring_threshold,
-                                                size_t survivor_limit);
-
-  virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
-                                           size_t max_tenured_available,
-                                           size_t cur_eden);
-
-  size_t eden_decrement_aligned_down(size_t cur_eden);
-  size_t eden_increment_aligned_up(size_t cur_eden);
-
-  size_t adjust_eden_for_pause_time(size_t cur_eden);
-  size_t adjust_eden_for_throughput(size_t cur_eden);
-  size_t adjust_eden_for_footprint(size_t cur_eden);
-
-  size_t promo_decrement_aligned_down(size_t cur_promo);
-  size_t promo_increment_aligned_up(size_t cur_promo);
-
-  size_t adjust_promo_for_pause_time(size_t cur_promo);
-  size_t adjust_promo_for_throughput(size_t cur_promo);
-  size_t adjust_promo_for_footprint(size_t cur_promo, size_t cur_eden);
-
-  // Scale down the input size by the ratio of the cost to collect the
-  // generation to the total GC cost.
-  size_t scale_by_gen_gc_cost(size_t base_change, double gen_gc_cost);
-
-  // Return the value and clear it.
-  bool get_and_clear_first_after_collection();
-
-  // Printing support
-  virtual bool print_adaptive_size_policy_on(outputStream* st) const;
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSADAPTIVESIZEPOLICY_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -23,9 +23,8 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
+#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/shared/gcPolicyCounters.hpp"
 #include "gc_implementation/shared/vmGCOperations.hpp"
@@ -57,25 +56,12 @@
   if (_generations == NULL)
     vm_exit_during_initialization("Unable to allocate gen spec");
 
-  if (UseParNewGC) {
-    if (UseAdaptiveSizePolicy) {
-      _generations[0] = new GenerationSpec(Generation::ASParNew,
-                                           _initial_young_size, _max_young_size);
-    } else {
-      _generations[0] = new GenerationSpec(Generation::ParNew,
-                                           _initial_young_size, _max_young_size);
-    }
-  } else {
-    _generations[0] = new GenerationSpec(Generation::DefNew,
-                                         _initial_young_size, _max_young_size);
-  }
-  if (UseAdaptiveSizePolicy) {
-    _generations[1] = new GenerationSpec(Generation::ASConcurrentMarkSweep,
-                                         _initial_old_size, _max_old_size);
-  } else {
-    _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
-                                         _initial_old_size, _max_old_size);
-  }
+  Generation::Name yg_name =
+    UseParNewGC ? Generation::ParNew : Generation::DefNew;
+  _generations[0] = new GenerationSpec(yg_name, _initial_young_size,
+                                       _max_young_size);
+  _generations[1] = new GenerationSpec(Generation::ConcurrentMarkSweep,
+                                       _initial_old_size, _max_old_size);
 
   if (_generations[0] == NULL || _generations[1] == NULL) {
     vm_exit_during_initialization("Unable to allocate gen spec");
@@ -85,14 +71,12 @@
 void ConcurrentMarkSweepPolicy::initialize_size_policy(size_t init_eden_size,
                                                size_t init_promo_size,
                                                size_t init_survivor_size) {
-  double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0;
   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
-  _size_policy = new CMSAdaptiveSizePolicy(init_eden_size,
-                                           init_promo_size,
-                                           init_survivor_size,
-                                           max_gc_minor_pause_sec,
-                                           max_gc_pause_sec,
-                                           GCTimeRatio);
+  _size_policy = new AdaptiveSizePolicy(init_eden_size,
+                                        init_promo_size,
+                                        init_survivor_size,
+                                        max_gc_pause_sec,
+                                        GCTimeRatio);
 }
 
 void ConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
@@ -110,22 +94,3 @@
 {
   return CMSIncrementalMode;
 }
-
-
-//
-// ASConcurrentMarkSweepPolicy methods
-//
-
-void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {
-
-  assert(size_policy() != NULL, "A size policy is required");
-  // initialize the policy counters - 2 collectors, 3 generations
-  if (UseParNewGC) {
-    _gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
-      size_policy());
-  }
-  else {
-    _gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
-      size_policy());
-  }
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -47,19 +47,4 @@
   virtual bool has_soft_ended_eden();
 };
 
-class ASConcurrentMarkSweepPolicy : public ConcurrentMarkSweepPolicy {
- public:
-
-  // Initialize the jstat counters.  This method requires a
-  // size policy.  The size policy is expected to be created
-  // after the generations are fully initialized so the
-  // initialization of the counters need to be done post
-  // the initialization of the generations.
-  void initialize_gc_policy_counters();
-
-  virtual CollectorPolicy::Name kind() {
-    return CollectorPolicy::ASConcurrentMarkSweepPolicyKind;
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,303 +0,0 @@
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
-#include "memory/resourceArea.hpp"
-
-CMSGCAdaptivePolicyCounters::CMSGCAdaptivePolicyCounters(const char* name_arg,
-                                        int collectors,
-                                        int generations,
-                                        AdaptiveSizePolicy* size_policy_arg)
-        : GCAdaptivePolicyCounters(name_arg,
-                                   collectors,
-                                   generations,
-                                   size_policy_arg) {
-  if (UsePerfData) {
-    EXCEPTION_MARK;
-    ResourceMark rm;
-
-    const char* cname =
-      PerfDataManager::counter_name(name_space(), "cmsCapacity");
-    _cms_capacity_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Bytes, (jlong) OldSize, CHECK);
-#ifdef NOT_PRODUCT
-    cname =
-      PerfDataManager::counter_name(name_space(), "initialPause");
-    _initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_initial_pause()->last_sample(),
-      CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "remarkPause");
-    _remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_remark_pause()->last_sample(),
-      CHECK);
-#endif
-    cname =
-      PerfDataManager::counter_name(name_space(), "avgInitialPause");
-    _avg_initial_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-      PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_initial_pause()->average(),
-      CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgRemarkPause");
-    _avg_remark_pause_counter = PerfDataManager::create_variable(SUN_GC, cname,
-    PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_remark_pause()->average(),
-      CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgSTWGcCost");
-    _avg_cms_STW_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_cms_STW_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgSTWTime");
-    _avg_cms_STW_time_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-      (jlong) cms_size_policy()->avg_cms_STW_time()->average(),
-        CHECK);
-
-
-    cname = PerfDataManager::counter_name(name_space(), "avgConcurrentTime");
-    _avg_concurrent_time_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_concurrent_time()->average(),
-        CHECK);
-
-    cname =
-      PerfDataManager::counter_name(name_space(), "avgConcurrentInterval");
-    _avg_concurrent_interval_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_concurrent_interval()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgConcurrentGcCost");
-    _avg_concurrent_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_concurrent_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgCMSFreeAtSweep");
-    _avg_cms_free_at_sweep_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_cms_free_at_sweep()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgCMSFree");
-    _avg_cms_free_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_cms_free()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgCMSPromo");
-    _avg_cms_promo_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_cms_promo()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMscPause");
-    _avg_msc_pause_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_msc_pause()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMscInterval");
-    _avg_msc_interval_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_msc_interval()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "mscGcCost");
-    _msc_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_msc_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMsPause");
-    _avg_ms_pause_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_ms_pause()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgMsInterval");
-    _avg_ms_interval_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_ms_interval()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "msGcCost");
-    _ms_gc_cost_counter = PerfDataManager::create_variable(SUN_GC,
-        cname,
-        PerfData::U_Ticks,
-        (jlong) cms_size_policy()->avg_ms_gc_cost()->average(),
-        CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "majorGcCost");
-    _major_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, cname,
-       PerfData::U_Ticks, (jlong) cms_size_policy()->cms_gc_cost(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
-    _promoted_avg_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
-    _promoted_avg_dev_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) 0 , CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
-    _promoted_padded_avg_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        cms_size_policy()->calculated_promo_size_in_bytes(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(),
-      "changeYoungGenForMajPauses");
-    _change_young_gen_for_maj_pauses_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events,
-        (jlong)0, CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "remarkPauseOldSlope");
-    _remark_pause_old_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->remark_pause_old_slope(), CHECK);
-
-    cname = PerfDataManager::counter_name(name_space(), "initialPauseOldSlope");
-    _initial_pause_old_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->initial_pause_old_slope(), CHECK);
-
-    cname =
-      PerfDataManager::counter_name(name_space(), "remarkPauseYoungSlope") ;
-    _remark_pause_young_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->remark_pause_young_slope(), CHECK);
-
-    cname =
-      PerfDataManager::counter_name(name_space(), "initialPauseYoungSlope");
-    _initial_pause_young_slope_counter =
-      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-        (jlong) cms_size_policy()->initial_pause_young_slope(), CHECK);
-
-
-  }
-  assert(size_policy()->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-}
-
-void CMSGCAdaptivePolicyCounters::update_counters() {
-  if (UsePerfData) {
-    GCAdaptivePolicyCounters::update_counters_from_policy();
-    update_counters_from_policy();
-  }
-}
-
-void CMSGCAdaptivePolicyCounters::update_counters(CMSGCStats* gc_stats) {
-  if (UsePerfData) {
-    update_counters();
-    update_promoted((size_t) gc_stats->avg_promoted()->last_sample());
-    update_avg_promoted_avg(gc_stats);
-    update_avg_promoted_dev(gc_stats);
-    update_avg_promoted_padded_avg(gc_stats);
-  }
-}
-
-void CMSGCAdaptivePolicyCounters::update_counters_from_policy() {
-  if (UsePerfData && (cms_size_policy() != NULL)) {
-
-    GCAdaptivePolicyCounters::update_counters_from_policy();
-
-    update_major_gc_cost_counter();
-    update_mutator_cost_counter();
-
-    update_eden_size();
-    update_promo_size();
-
-    // If these updates from the last_sample() work,
-    // revise the update methods for these counters
-    // (both here and in PS).
-    update_survived((size_t) cms_size_policy()->avg_survived()->last_sample());
-
-    update_avg_concurrent_time_counter();
-    update_avg_concurrent_interval_counter();
-    update_avg_concurrent_gc_cost_counter();
-#ifdef NOT_PRODUCT
-    update_initial_pause_counter();
-    update_remark_pause_counter();
-#endif
-    update_avg_initial_pause_counter();
-    update_avg_remark_pause_counter();
-
-    update_avg_cms_STW_time_counter();
-    update_avg_cms_STW_gc_cost_counter();
-
-    update_avg_cms_free_counter();
-    update_avg_cms_free_at_sweep_counter();
-    update_avg_cms_promo_counter();
-
-    update_avg_msc_pause_counter();
-    update_avg_msc_interval_counter();
-    update_msc_gc_cost_counter();
-
-    update_avg_ms_pause_counter();
-    update_avg_ms_interval_counter();
-    update_ms_gc_cost_counter();
-
-    update_avg_old_live_counter();
-
-    update_survivor_size_counters();
-    update_avg_survived_avg_counters();
-    update_avg_survived_dev_counters();
-
-    update_decrement_tenuring_threshold_for_gc_cost();
-    update_increment_tenuring_threshold_for_gc_cost();
-    update_decrement_tenuring_threshold_for_survivor_limit();
-
-    update_change_young_gen_for_maj_pauses();
-
-    update_major_collection_slope_counter();
-    update_remark_pause_old_slope_counter();
-    update_initial_pause_old_slope_counter();
-    update_remark_pause_young_slope_counter();
-    update_initial_pause_young_slope_counter();
-
-    update_decide_at_full_gc_counter();
-  }
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,308 +0,0 @@
-/*
- * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
-
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"
-#include "gc_implementation/shared/gcStats.hpp"
-#include "runtime/perfData.hpp"
-
-// CMSGCAdaptivePolicyCounters is a holder class for performance counters
-// that track the data and decisions for the ergonomics policy for the
-// concurrent mark sweep collector
-
-class CMSGCAdaptivePolicyCounters : public GCAdaptivePolicyCounters {
-  friend class VMStructs;
-
- private:
-
-  // Capacity of tenured generation recorded at the end of
-  // any collection.
-  PerfVariable* _cms_capacity_counter; // Make this common with PS _old_capacity
-
-  // Average stop-the-world pause time for both initial and
-  // remark pauses sampled at the end of the checkpointRootsFinalWork.
-  PerfVariable* _avg_cms_STW_time_counter;
-  // Average stop-the-world (STW) GC cost for the STW pause time
-  // _avg_cms_STW_time_counter.
-  PerfVariable* _avg_cms_STW_gc_cost_counter;
-
-#ifdef NOT_PRODUCT
-  // These are useful to see how the most recent values of these
-  // counters compare to their respective averages but
-  // do not control behavior.
-  PerfVariable* _initial_pause_counter;
-  PerfVariable* _remark_pause_counter;
-#endif
-
-  // Average of the initial marking pause for a concurrent collection.
-  PerfVariable* _avg_initial_pause_counter;
-  // Average of the remark pause for a concurrent collection.
-  PerfVariable* _avg_remark_pause_counter;
-
-  // Average for the sum of all the concurrent times per collection.
-  PerfVariable* _avg_concurrent_time_counter;
-  // Average for the time between the most recent end of a
-  // concurrent collection and the beginning of the next
-  // concurrent collection.
-  PerfVariable* _avg_concurrent_interval_counter;
-  // Average of the concurrent GC costs based on _avg_concurrent_time_counter
-  // and _avg_concurrent_interval_counter.
-  PerfVariable* _avg_concurrent_gc_cost_counter;
-
-  // Average of the free space in the tenured generation at the
-  // end of the sweep of the tenured generation.
-  PerfVariable* _avg_cms_free_counter;
-  // Average of the free space in the tenured generation at the
-  // start of the sweep of the tenured generation.
-  PerfVariable* _avg_cms_free_at_sweep_counter;
-  // Average of the free space in the tenured generation at the
-  // after any resizing of the tenured generation at the end
-  // of a collection of the tenured generation.
-  PerfVariable* _avg_cms_promo_counter;
-
-  // Average of  the mark-sweep-compact (MSC) pause time for a collection
-  // of the tenured generation.
-  PerfVariable* _avg_msc_pause_counter;
-  // Average for the time between the most recent end of a
-  // MSC collection and the beginning of the next MSC collection.
-  PerfVariable* _avg_msc_interval_counter;
-  // Average for the GC cost of a MSC collection based on
-  // _avg_msc_pause_counter and _avg_msc_interval_counter.
-  PerfVariable* _msc_gc_cost_counter;
-
-  // Average of  the mark-sweep (MS) pause time for a collection
-  // of the tenured generation.
-  PerfVariable* _avg_ms_pause_counter;
-  // Average for the time between the most recent end of a
-  // MS collection and the beginning of the next MS collection.
-  PerfVariable* _avg_ms_interval_counter;
-  // Average for the GC cost of a MS collection based on
-  // _avg_ms_pause_counter and _avg_ms_interval_counter.
-  PerfVariable* _ms_gc_cost_counter;
-
-  // Average of the bytes promoted per minor collection.
-  PerfVariable* _promoted_avg_counter;
-  // Average of the deviation of the promoted average.
-  PerfVariable* _promoted_avg_dev_counter;
-  // Padded average of the bytes promoted per minor collection.
-  PerfVariable* _promoted_padded_avg_counter;
-
-  // See description of the _change_young_gen_for_maj_pauses
-  // variable recently in cmsAdaptiveSizePolicy.hpp.
-  PerfVariable* _change_young_gen_for_maj_pauses_counter;
-
-  // See descriptions of _remark_pause_old_slope, _initial_pause_old_slope,
-  // etc. variables recently in cmsAdaptiveSizePolicy.hpp.
-  PerfVariable* _remark_pause_old_slope_counter;
-  PerfVariable* _initial_pause_old_slope_counter;
-  PerfVariable* _remark_pause_young_slope_counter;
-  PerfVariable* _initial_pause_young_slope_counter;
-
-  CMSAdaptiveSizePolicy* cms_size_policy() {
-    assert(_size_policy->kind() ==
-      AdaptiveSizePolicy::_gc_cms_adaptive_size_policy,
-      "Wrong size policy");
-    return (CMSAdaptiveSizePolicy*)_size_policy;
-  }
-
-  inline void update_avg_cms_STW_time_counter() {
-    _avg_cms_STW_time_counter->set_value(
-      (jlong) (cms_size_policy()->avg_cms_STW_time()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_cms_STW_gc_cost_counter() {
-    _avg_cms_STW_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_cms_STW_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_avg_initial_pause_counter() {
-    _avg_initial_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_initial_pause()->average() *
-      (double) MILLIUNITS));
-  }
-#ifdef NOT_PRODUCT
-  inline void update_avg_remark_pause_counter() {
-    _avg_remark_pause_counter->set_value(
-      (jlong) (cms_size_policy()-> avg_remark_pause()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_initial_pause_counter() {
-    _initial_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_initial_pause()->average() *
-      (double) MILLIUNITS));
-  }
-#endif
-  inline void update_remark_pause_counter() {
-    _remark_pause_counter->set_value(
-      (jlong) (cms_size_policy()-> avg_remark_pause()->last_sample() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_concurrent_time_counter() {
-    _avg_concurrent_time_counter->set_value(
-      (jlong) (cms_size_policy()->avg_concurrent_time()->last_sample() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_concurrent_interval_counter() {
-    _avg_concurrent_interval_counter->set_value(
-      (jlong) (cms_size_policy()->avg_concurrent_interval()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_concurrent_gc_cost_counter() {
-    _avg_concurrent_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_concurrent_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_avg_cms_free_counter() {
-    _avg_cms_free_counter->set_value(
-      (jlong) cms_size_policy()->avg_cms_free()->average());
-  }
-
-  inline void update_avg_cms_free_at_sweep_counter() {
-    _avg_cms_free_at_sweep_counter->set_value(
-      (jlong) cms_size_policy()->avg_cms_free_at_sweep()->average());
-  }
-
-  inline void update_avg_cms_promo_counter() {
-    _avg_cms_promo_counter->set_value(
-      (jlong) cms_size_policy()->avg_cms_promo()->average());
-  }
-
-  inline void update_avg_old_live_counter() {
-    _avg_old_live_counter->set_value(
-      (jlong)(cms_size_policy()->avg_old_live()->average())
-    );
-  }
-
-  inline void update_avg_msc_pause_counter() {
-    _avg_msc_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_msc_pause()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_msc_interval_counter() {
-    _avg_msc_interval_counter->set_value(
-      (jlong) (cms_size_policy()->avg_msc_interval()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_msc_gc_cost_counter() {
-    _msc_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_msc_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_avg_ms_pause_counter() {
-    _avg_ms_pause_counter->set_value(
-      (jlong) (cms_size_policy()->avg_ms_pause()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_avg_ms_interval_counter() {
-    _avg_ms_interval_counter->set_value(
-      (jlong) (cms_size_policy()->avg_ms_interval()->average() *
-      (double) MILLIUNITS));
-  }
-
-  inline void update_ms_gc_cost_counter() {
-    _ms_gc_cost_counter->set_value(
-      (jlong) (cms_size_policy()->avg_ms_gc_cost()->average() * 100.0));
-  }
-
-  inline void update_major_gc_cost_counter() {
-    _major_gc_cost_counter->set_value(
-      (jlong)(cms_size_policy()->cms_gc_cost() * 100.0)
-    );
-  }
-  inline void update_mutator_cost_counter() {
-    _mutator_cost_counter->set_value(
-      (jlong)(cms_size_policy()->mutator_cost() * 100.0)
-    );
-  }
-
-  inline void update_avg_promoted_avg(CMSGCStats* gc_stats) {
-    _promoted_avg_counter->set_value(
-      (jlong)(gc_stats->avg_promoted()->average())
-    );
-  }
-  inline void update_avg_promoted_dev(CMSGCStats* gc_stats) {
-    _promoted_avg_dev_counter->set_value(
-      (jlong)(gc_stats->avg_promoted()->deviation())
-    );
-  }
-  inline void update_avg_promoted_padded_avg(CMSGCStats* gc_stats) {
-    _promoted_padded_avg_counter->set_value(
-      (jlong)(gc_stats->avg_promoted()->padded_average())
-    );
-  }
-  inline void update_remark_pause_old_slope_counter() {
-    _remark_pause_old_slope_counter->set_value(
-      (jlong)(cms_size_policy()->remark_pause_old_slope() * 1000)
-    );
-  }
-  inline void update_initial_pause_old_slope_counter() {
-    _initial_pause_old_slope_counter->set_value(
-      (jlong)(cms_size_policy()->initial_pause_old_slope() * 1000)
-    );
-  }
-  inline void update_remark_pause_young_slope_counter() {
-    _remark_pause_young_slope_counter->set_value(
-      (jlong)(cms_size_policy()->remark_pause_young_slope() * 1000)
-    );
-  }
-  inline void update_initial_pause_young_slope_counter() {
-    _initial_pause_young_slope_counter->set_value(
-      (jlong)(cms_size_policy()->initial_pause_young_slope() * 1000)
-    );
-  }
-  inline void update_change_young_gen_for_maj_pauses() {
-    _change_young_gen_for_maj_pauses_counter->set_value(
-      cms_size_policy()->change_young_gen_for_maj_pauses());
-  }
-
- public:
-  CMSGCAdaptivePolicyCounters(const char* name, int collectors, int generations,
-                              AdaptiveSizePolicy* size_policy);
-
-  // update counters
-  void update_counters();
-  void update_counters(CMSGCStats* gc_stats);
-  void update_counters_from_policy();
-
-  inline void update_cms_capacity_counter(size_t size_in_bytes) {
-    _cms_capacity_counter->set_value(size_in_bytes);
-  }
-
-  virtual GCPolicyCounters::Name kind() const {
-    return GCPolicyCounters::CMSGCAdaptivePolicyCountersKind;
-  }
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSGCADAPTIVEPOLICYCOUNTERS_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -70,7 +70,6 @@
 class CompactibleFreeListSpace: public CompactibleSpace {
   friend class VMStructs;
   friend class ConcurrentMarkSweepGeneration;
-  friend class ASConcurrentMarkSweepGeneration;
   friend class CMSCollector;
   // Local alloc buffer for promotion into this space.
   friend class CFLS_LAB;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -27,9 +27,8 @@
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
+#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp"
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
@@ -319,26 +318,12 @@
   }
 }
 
-CMSAdaptiveSizePolicy* CMSCollector::size_policy() {
+AdaptiveSizePolicy* CMSCollector::size_policy() {
   GenCollectedHeap* gch = GenCollectedHeap::heap();
   assert(gch->kind() == CollectedHeap::GenCollectedHeap,
     "Wrong type of heap");
-  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
-    gch->gen_policy()->size_policy();
-  assert(sp->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-  return sp;
-}
-
-CMSGCAdaptivePolicyCounters* CMSCollector::gc_adaptive_policy_counters() {
-  CMSGCAdaptivePolicyCounters* results =
-    (CMSGCAdaptivePolicyCounters*) collector_policy()->counters();
-  assert(
-    results->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-    "Wrong gc policy counter kind");
-  return results;
-}
-
+  return gch->gen_policy()->size_policy();
+}
 
 void ConcurrentMarkSweepGeneration::initialize_performance_counters() {
 
@@ -2031,11 +2016,6 @@
       "collections passed to foreground collector", _full_gcs_since_conc_gc);
   }
 
-  // Sample collection interval time and reset for collection pause.
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->msc_collection_begin();
-  }
-
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
   MemRegion new_span(GenCollectedHeap::heap()->reserved_region());
@@ -2111,11 +2091,6 @@
   _inter_sweep_timer.reset();
   _inter_sweep_timer.start();
 
-  // Sample collection pause time and reset for collection interval.
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->msc_collection_end(gch->gc_cause());
-  }
-
   gc_timer->register_gc_end();
 
   gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
@@ -2373,26 +2348,14 @@
         }
         break;
       case Precleaning:
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_begin();
-        }
         // marking from roots in markFromRoots has been completed
         preclean();
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_end();
-        }
         assert(_collectorState == AbortablePreclean ||
                _collectorState == FinalMarking,
                "Collector state should have changed");
         break;
       case AbortablePreclean:
-        if (UseAdaptiveSizePolicy) {
-        size_policy()->concurrent_phases_resume();
-        }
         abortable_preclean();
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_precleaning_end();
-        }
         assert(_collectorState == FinalMarking, "Collector state should "
           "have changed");
         break;
@@ -2406,23 +2369,12 @@
         assert(_foregroundGCShouldWait, "block post-condition");
         break;
       case Sweeping:
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_sweeping_begin();
-        }
         // final marking in checkpointRootsFinal has been completed
         sweep(true);
         assert(_collectorState == Resizing, "Collector state change "
           "to Resizing must be done under the free_list_lock");
         _full_gcs_since_conc_gc = 0;
 
-        // Stop the timers for adaptive size policy for the concurrent phases
-        if (UseAdaptiveSizePolicy) {
-          size_policy()->concurrent_sweeping_end();
-          size_policy()->concurrent_phases_end(gch->gc_cause(),
-                                             gch->prev_gen(_cmsGen)->capacity(),
-                                             _cmsGen->free());
-        }
-
       case Resizing: {
         // Sweeping has been completed...
         // At this point the background collection has completed.
@@ -2539,9 +2491,6 @@
   const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
   NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
     true, NULL, gc_id);)
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->ms_collection_begin();
-  }
   COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
 
   HandleMark hm;  // Discard invalid handles created during verification
@@ -2633,11 +2582,6 @@
     }
   }
 
-  if (UseAdaptiveSizePolicy) {
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    size_policy()->ms_collection_end(gch->gc_cause());
-  }
-
   if (VerifyAfterGC &&
       GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
     Universe::verify();
@@ -3687,9 +3631,6 @@
 
   NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
     PrintGCDetails && Verbose, true, _gc_timer_cm, _gc_tracer_cm->gc_id());)
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_initial_begin();
-  }
 
   // Reset all the PLAB chunk arrays if necessary.
   if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
@@ -3769,9 +3710,6 @@
   // Save the end of the used_region of the constituent generations
   // to be used to limit the extent of sweep in each generation.
   save_sweep_limits();
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_initial_end(gch->gc_cause());
-  }
   verify_overflow_empty();
 }
 
@@ -3788,15 +3726,6 @@
 
   bool res;
   if (asynch) {
-
-    // Start the timers for adaptive size policy for the concurrent phases
-    // Do it here so that the foreground MS can use the concurrent
-    // timer since a foreground MS might has the sweep done concurrently
-    // or STW.
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->concurrent_marking_begin();
-    }
-
     // Weak ref discovery note: We may be discovering weak
     // refs in this generation concurrent (but interleaved) with
     // weak ref discovery by a younger generation collector.
@@ -3814,22 +3743,12 @@
         gclog_or_tty->print_cr("bailing out to foreground collection");
       }
     }
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->concurrent_marking_end();
-    }
   } else {
     assert(SafepointSynchronize::is_at_safepoint(),
            "inconsistent with asynch == false");
-    if (UseAdaptiveSizePolicy) {
-      size_policy()->ms_collection_marking_begin();
-    }
     // already have locks
     res = markFromRootsWork(asynch);
     _collectorState = FinalMarking;
-    if (UseAdaptiveSizePolicy) {
-      GenCollectedHeap* gch = GenCollectedHeap::heap();
-      size_policy()->ms_collection_marking_end(gch->gc_cause());
-    }
   }
   verify_overflow_empty();
   return res;
@@ -4705,8 +4624,7 @@
 
   if (clean_survivor) {  // preclean the active survivor space(s)
     assert(_young_gen->kind() == Generation::DefNew ||
-           _young_gen->kind() == Generation::ParNew ||
-           _young_gen->kind() == Generation::ASParNew,
+           _young_gen->kind() == Generation::ParNew,
          "incorrect type for cast");
     DefNewGeneration* dng = (DefNewGeneration*)_young_gen;
     PushAndMarkClosure pam_cl(this, _span, ref_processor(),
@@ -5077,10 +4995,6 @@
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
 
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_final_begin();
-  }
-
   ResourceMark rm;
   HandleMark   hm;
 
@@ -5214,9 +5128,6 @@
       "Should be clear by end of the final marking");
   assert(_ct->klass_rem_set()->mod_union_is_clear(),
       "Should be clear by end of the final marking");
-  if (UseAdaptiveSizePolicy) {
-    size_policy()->checkpoint_roots_final_end(gch->gc_cause());
-  }
 }
 
 void CMSParInitialMarkTask::work(uint worker_id) {
@@ -6329,7 +6240,6 @@
 
   _inter_sweep_timer.stop();
   _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
-  size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
 
   assert(!_intra_sweep_timer.is_active(), "Should not be active");
   _intra_sweep_timer.reset();
@@ -6454,17 +6364,6 @@
   }
 }
 
-CMSAdaptiveSizePolicy* ConcurrentMarkSweepGeneration::size_policy() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "Wrong type of heap");
-  CMSAdaptiveSizePolicy* sp = (CMSAdaptiveSizePolicy*)
-    gch->gen_policy()->size_policy();
-  assert(sp->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-  return sp;
-}
-
 void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
@@ -6540,9 +6439,6 @@
 // Reset CMS data structures (for now just the marking bit map)
 // preparatory for the next cycle.
 void CMSCollector::reset(bool asynch) {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  CMSAdaptiveSizePolicy* sp = size_policy();
-  AdaptiveSizePolicyOutput(sp, gch->total_collections());
   if (asynch) {
     CMSTokenSyncWithLocks ts(true, bitMapLock());
 
@@ -6597,7 +6493,7 @@
     // Because only the full (i.e., concurrent mode failure) collections
     // are being measured for gc overhead limits, clean the "near" flag
     // and count.
-    sp->reset_gc_overhead_limit_count();
+    size_policy()->reset_gc_overhead_limit_count();
     _collectorState = Idling;
   } else {
     // already have the lock
@@ -7064,7 +6960,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -7225,7 +7120,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -7298,7 +7192,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -7457,7 +7350,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -8099,7 +7991,6 @@
   ConcurrentMarkSweepThread::acknowledge_yield_request();
 
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -8780,7 +8671,6 @@
   ConcurrentMarkSweepThread::desynchronize(true);
   ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
-  GCPauseTimer p(_collector->size_policy()->concurrent_timer_ptr());
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
@@ -9327,172 +9217,6 @@
 }
 #endif
 
-CMSAdaptiveSizePolicy* ASConcurrentMarkSweepGeneration::cms_size_policy() const
-{
-  GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
-  CMSAdaptiveSizePolicy* size_policy =
-    (CMSAdaptiveSizePolicy*) gch->gen_policy()->size_policy();
-  assert(size_policy->is_gc_cms_adaptive_size_policy(),
-    "Wrong type for size policy");
-  return size_policy;
-}
-
-void ASConcurrentMarkSweepGeneration::resize(size_t cur_promo_size,
-                                           size_t desired_promo_size) {
-  if (cur_promo_size < desired_promo_size) {
-    size_t expand_bytes = desired_promo_size - cur_promo_size;
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
-        "Expanding tenured generation by " SIZE_FORMAT " (bytes)",
-        expand_bytes);
-    }
-    expand(expand_bytes,
-           MinHeapDeltaBytes,
-           CMSExpansionCause::_adaptive_size_policy);
-  } else if (desired_promo_size < cur_promo_size) {
-    size_t shrink_bytes = cur_promo_size - desired_promo_size;
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr(" ASConcurrentMarkSweepGeneration::resize "
-        "Shrinking tenured generation by " SIZE_FORMAT " (bytes)",
-        shrink_bytes);
-    }
-    shrink(shrink_bytes);
-  }
-}
-
-CMSGCAdaptivePolicyCounters* ASConcurrentMarkSweepGeneration::gc_adaptive_policy_counters() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  CMSGCAdaptivePolicyCounters* counters =
-    (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
-  assert(counters->kind() == GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-    "Wrong kind of counters");
-  return counters;
-}
-
-
-void ASConcurrentMarkSweepGeneration::update_counters() {
-  if (UsePerfData) {
-    _space_counters->update_all();
-    _gen_counters->update_all();
-    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
-    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
-      "Wrong gc statistics type");
-    counters->update_counters(gc_stats_l);
-  }
-}
-
-void ASConcurrentMarkSweepGeneration::update_counters(size_t used) {
-  if (UsePerfData) {
-    _space_counters->update_used(used);
-    _space_counters->update_capacity();
-    _gen_counters->update_all();
-
-    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    CMSGCStats* gc_stats_l = (CMSGCStats*) gc_stats();
-    assert(gc_stats_l->kind() == GCStats::CMSGCStatsKind,
-      "Wrong gc statistics type");
-    counters->update_counters(gc_stats_l);
-  }
-}
-
-void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  assert_lock_strong(freelistLock());
-  HeapWord* old_end = _cmsSpace->end();
-  HeapWord* unallocated_start = _cmsSpace->unallocated_block();
-  assert(old_end >= unallocated_start, "Miscalculation of unallocated_start");
-  FreeChunk* chunk_at_end = find_chunk_at_end();
-  if (chunk_at_end == NULL) {
-    // No room to shrink
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("No room to shrink: old_end  "
-        PTR_FORMAT "  unallocated_start  " PTR_FORMAT
-        " chunk_at_end  " PTR_FORMAT,
-        old_end, unallocated_start, chunk_at_end);
-    }
-    return;
-  } else {
-
-    // Find the chunk at the end of the space and determine
-    // how much it can be shrunk.
-    size_t shrinkable_size_in_bytes = chunk_at_end->size();
-    size_t aligned_shrinkable_size_in_bytes =
-      align_size_down(shrinkable_size_in_bytes, os::vm_page_size());
-    assert(unallocated_start <= (HeapWord*) chunk_at_end->end(),
-      "Inconsistent chunk at end of space");
-    size_t bytes = MIN2(desired_bytes, aligned_shrinkable_size_in_bytes);
-    size_t word_size_before = heap_word_size(_virtual_space.committed_size());
-
-    // Shrink the underlying space
-    _virtual_space.shrink_by(bytes);
-    if (PrintGCDetails && Verbose) {
-      gclog_or_tty->print_cr("ConcurrentMarkSweepGeneration::shrink_by:"
-        " desired_bytes " SIZE_FORMAT
-        " shrinkable_size_in_bytes " SIZE_FORMAT
-        " aligned_shrinkable_size_in_bytes " SIZE_FORMAT
-        "  bytes  " SIZE_FORMAT,
-        desired_bytes, shrinkable_size_in_bytes,
-        aligned_shrinkable_size_in_bytes, bytes);
-      gclog_or_tty->print_cr("          old_end  " SIZE_FORMAT
-        "  unallocated_start  " SIZE_FORMAT,
-        old_end, unallocated_start);
-    }
-
-    // If the space did shrink (shrinking is not guaranteed),
-    // shrink the chunk at the end by the appropriate amount.
-    if (((HeapWord*)_virtual_space.high()) < old_end) {
-      size_t new_word_size =
-        heap_word_size(_virtual_space.committed_size());
-
-      // Have to remove the chunk from the dictionary because it is changing
-      // size and might be someplace elsewhere in the dictionary.
-
-      // Get the chunk at end, shrink it, and put it
-      // back.
-      _cmsSpace->removeChunkFromDictionary(chunk_at_end);
-      size_t word_size_change = word_size_before - new_word_size;
-      size_t chunk_at_end_old_size = chunk_at_end->size();
-      assert(chunk_at_end_old_size >= word_size_change,
-        "Shrink is too large");
-      chunk_at_end->set_size(chunk_at_end_old_size -
-                          word_size_change);
-      _cmsSpace->freed((HeapWord*) chunk_at_end->end(),
-        word_size_change);
-
-      _cmsSpace->returnChunkToDictionary(chunk_at_end);
-
-      MemRegion mr(_cmsSpace->bottom(), new_word_size);
-      _bts->resize(new_word_size);  // resize the block offset shared array
-      Universe::heap()->barrier_set()->resize_covered_region(mr);
-      _cmsSpace->assert_locked();
-      _cmsSpace->set_end((HeapWord*)_virtual_space.high());
-
-      NOT_PRODUCT(_cmsSpace->dictionary()->verify());
-
-      // update the space and generation capacity counters
-      if (UsePerfData) {
-        _space_counters->update_capacity();
-        _gen_counters->update_all();
-      }
-
-      if (Verbose && PrintGCDetails) {
-        size_t new_mem_size = _virtual_space.committed_size();
-        size_t old_mem_size = new_mem_size + bytes;
-        gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
-      }
-    }
-
-    assert(_cmsSpace->unallocated_block() <= _cmsSpace->end(),
-      "Inconsistency at end of space");
-    assert(chunk_at_end->end() == (uintptr_t*) _cmsSpace->end(),
-      "Shrinking is inconsistent");
-    return;
-  }
-}
 // Transfer some number of overflown objects to usual marking
 // stack. Return true if some objects were transferred.
 bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -52,7 +52,7 @@
 // Concurrent mode failures are currently handled by
 // means of a sliding mark-compact.
 
-class CMSAdaptiveSizePolicy;
+class AdaptiveSizePolicy;
 class CMSConcMarkingTask;
 class CMSGCAdaptivePolicyCounters;
 class CMSTracer;
@@ -1009,8 +1009,7 @@
   void icms_wait();          // Called at yield points.
 
   // Adaptive size policy
-  CMSAdaptiveSizePolicy* size_policy();
-  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
+  AdaptiveSizePolicy* size_policy();
 
   static void print_on_error(outputStream* st);
 
@@ -1150,9 +1149,6 @@
 
   virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
 
-  // Adaptive size policy
-  CMSAdaptiveSizePolicy* size_policy();
-
   void set_did_compact(bool v) { _did_compact = v; }
 
   bool refs_discovery_is_atomic() const { return false; }
@@ -1346,37 +1342,6 @@
   void rotate_debug_collection_type();
 };
 
-class ASConcurrentMarkSweepGeneration : public ConcurrentMarkSweepGeneration {
-
-  // Return the size policy from the heap's collector
-  // policy casted to CMSAdaptiveSizePolicy*.
-  CMSAdaptiveSizePolicy* cms_size_policy() const;
-
-  // Resize the generation based on the adaptive size
-  // policy.
-  void resize(size_t cur_promo, size_t desired_promo);
-
-  // Return the GC counters from the collector policy
-  CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
-
-  virtual void shrink_by(size_t bytes);
-
- public:
-  ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
-                                  int level, CardTableRS* ct,
-                                  bool use_adaptive_freelists,
-                                  FreeBlockDictionary<FreeChunk>::DictionaryChoice
-                                    dictionaryChoice) :
-    ConcurrentMarkSweepGeneration(rs, initial_byte_size, level, ct,
-      use_adaptive_freelists, dictionaryChoice) {}
-
-  virtual const char* short_name() const { return "ASCMS"; }
-  virtual Generation::Name kind() { return Generation::ASConcurrentMarkSweep; }
-
-  virtual void update_counters();
-  virtual void update_counters(size_t used);
-};
-
 //
 // Closures of various sorts used by CMS to accomplish its work
 //
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_INLINE_HPP
 
 #include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
 
 inline HeapWord* G1AllocRegion::allocate(HeapRegion* alloc_region,
                                          size_t word_size,
--- a/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BlockOffsetTable.inline.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -26,7 +26,8 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BLOCKOFFSETTABLE_INLINE_HPP
 
 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
-#include "gc_implementation/g1/heapRegion.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "memory/space.hpp"
 
 inline HeapWord* G1BlockOffsetTable::block_start(const void* addr) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -44,6 +44,7 @@
 #include "gc_implementation/g1/g1Log.hpp"
 #include "gc_implementation/g1/g1MarkSweep.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -63,11 +64,9 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.pcgc.inline.hpp"
 #include "runtime/atomic.inline.hpp"
-#include "runtime/prefetch.inline.hpp"
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/globalDefinitions.hpp"
-#include "utilities/ticks.hpp"
 
 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
 
@@ -4352,7 +4351,7 @@
       !(retained_region->top() == retained_region->end()) &&
       !retained_region->is_empty() &&
       !retained_region->isHumongous()) {
-    retained_region->set_saved_mark();
+    retained_region->record_top_and_timestamp();
     // The retained region was added to the old region set when it was
     // retired. We have to remove it now, since we don't allow regions
     // we allocate to in the region sets. We'll re-add it later, when
@@ -4559,126 +4558,6 @@
 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
 
-G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
-  : _g1h(g1h),
-    _refs(g1h->task_queue(queue_num)),
-    _dcq(&g1h->dirty_card_queue_set()),
-    _ct_bs(g1h->g1_barrier_set()),
-    _g1_rem(g1h->g1_rem_set()),
-    _hash_seed(17), _queue_num(queue_num),
-    _term_attempts(0),
-    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
-    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
-    _age_table(false), _scanner(g1h, this, rp),
-    _strong_roots_time(0), _term_time(0),
-    _alloc_buffer_waste(0), _undo_waste(0) {
-  // we allocate G1YoungSurvRateNumRegions plus one entries, since
-  // we "sacrifice" entry 0 to keep track of surviving bytes for
-  // non-young regions (where the age is -1)
-  // We also add a few elements at the beginning and at the end in
-  // an attempt to eliminate cache contention
-  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
-  uint array_length = PADDING_ELEM_NUM +
-                      real_length +
-                      PADDING_ELEM_NUM;
-  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
-  if (_surviving_young_words_base == NULL)
-    vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
-                          "Not enough space for young surv histo.");
-  _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
-  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
-
-  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
-  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
-
-  _start = os::elapsedTime();
-}
-
-void
-G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
-{
-  st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
-                   " ------waste (KiB)------");
-  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
-                   "  total   alloc    undo");
-  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
-                   " ------- ------- -------");
-}
-
-void
-G1ParScanThreadState::print_termination_stats(int i,
-                                              outputStream* const st) const
-{
-  const double elapsed_ms = elapsed_time() * 1000.0;
-  const double s_roots_ms = strong_roots_time() * 1000.0;
-  const double term_ms    = term_time() * 1000.0;
-  st->print_cr("%3d %9.2f %9.2f %6.2f "
-               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
-               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
-               i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
-               term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
-               (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
-               alloc_buffer_waste() * HeapWordSize / K,
-               undo_waste() * HeapWordSize / K);
-}
-
-#ifdef ASSERT
-bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
-  assert(ref != NULL, "invariant");
-  assert(UseCompressedOops, "sanity");
-  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
-  oop p = oopDesc::load_decode_heap_oop(ref);
-  assert(_g1h->is_in_g1_reserved(p),
-         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  return true;
-}
-
-bool G1ParScanThreadState::verify_ref(oop* ref) const {
-  assert(ref != NULL, "invariant");
-  if (has_partial_array_mask(ref)) {
-    // Must be in the collection set--it's already been copied.
-    oop p = clear_partial_array_mask(ref);
-    assert(_g1h->obj_in_cs(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  } else {
-    oop p = oopDesc::load_decode_heap_oop(ref);
-    assert(_g1h->is_in_g1_reserved(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
-  }
-  return true;
-}
-
-bool G1ParScanThreadState::verify_task(StarTask ref) const {
-  if (ref.is_narrow()) {
-    return verify_ref((narrowOop*) ref);
-  } else {
-    return verify_ref((oop*) ref);
-  }
-}
-#endif // ASSERT
-
-void G1ParScanThreadState::trim_queue() {
-  assert(_evac_failure_cl != NULL, "not set");
-
-  StarTask ref;
-  do {
-    // Drain the overflow stack first, so other threads can steal.
-    while (refs()->pop_overflow(ref)) {
-      deal_with_reference(ref);
-    }
-
-    while (refs()->pop_local(ref)) {
-      deal_with_reference(ref);
-    }
-  } while (!refs()->is_empty());
-}
-
-G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1,
-                                     G1ParScanThreadState* par_scan_state) :
-  _g1(g1), _par_scan_state(par_scan_state),
-  _worker_id(par_scan_state->queue_num()) { }
-
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
@@ -4701,107 +4580,6 @@
   _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
 }
 
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
-  size_t word_sz = old->size();
-  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
-  // +1 to make the -1 indexes valid...
-  int       young_index = from_region->young_index_in_cset()+1;
-  assert( (from_region->is_young() && young_index >  0) ||
-         (!from_region->is_young() && young_index == 0), "invariant" );
-  G1CollectorPolicy* g1p = _g1h->g1_policy();
-  markOop m = old->mark();
-  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
-                                           : m->age();
-  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
-                                                             word_sz);
-  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
-#ifndef PRODUCT
-  // Should this evacuation fail?
-  if (_g1h->evacuation_should_fail()) {
-    if (obj_ptr != NULL) {
-      undo_allocation(alloc_purpose, obj_ptr, word_sz);
-      obj_ptr = NULL;
-    }
-  }
-#endif // !PRODUCT
-
-  if (obj_ptr == NULL) {
-    // This will either forward-to-self, or detect that someone else has
-    // installed a forwarding pointer.
-    return _g1h->handle_evacuation_failure_par(this, old);
-  }
-
-  oop obj = oop(obj_ptr);
-
-  // We're going to allocate linearly, so might as well prefetch ahead.
-  Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
-
-  oop forward_ptr = old->forward_to_atomic(obj);
-  if (forward_ptr == NULL) {
-    Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
-
-    // alloc_purpose is just a hint to allocate() above, recheck the type of region
-    // we actually allocated from and update alloc_purpose accordingly
-    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
-    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
-
-    if (g1p->track_object_age(alloc_purpose)) {
-      // We could simply do obj->incr_age(). However, this causes a
-      // performance issue. obj->incr_age() will first check whether
-      // the object has a displaced mark by checking its mark word;
-      // getting the mark word from the new location of the object
-      // stalls. So, given that we already have the mark word and we
-      // are about to install it anyway, it's better to increase the
-      // age on the mark word, when the object does not have a
-      // displaced mark word. We're not expecting many objects to have
-      // a displaced marked word, so that case is not optimized
-      // further (it could be...) and we simply call obj->incr_age().
-
-      if (m->has_displaced_mark_helper()) {
-        // in this case, we have to install the mark word first,
-        // otherwise obj looks to be forwarded (the old mark word,
-        // which contains the forward pointer, was copied)
-        obj->set_mark(m);
-        obj->incr_age();
-      } else {
-        m = m->incr_age();
-        obj->set_mark(m);
-      }
-      age_table()->add(obj, word_sz);
-    } else {
-      obj->set_mark(m);
-    }
-
-    if (G1StringDedup::is_enabled()) {
-      G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
-                                             to_region->is_young(),
-                                             queue_num(),
-                                             obj);
-    }
-
-    size_t* surv_young_words = surviving_young_words();
-    surv_young_words[young_index] += word_sz;
-
-    if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
-      // We keep track of the next start index in the length field of
-      // the to-space object. The actual length can be found in the
-      // length field of the from-space object.
-      arrayOop(obj)->set_length(0);
-      oop* old_p = set_partial_array_mask(old);
-      push_on_queue(old_p);
-    } else {
-      // No point in using the slower heap_region_containing() method,
-      // given that we know obj is in the heap.
-      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
-      obj->oop_iterate_backwards(&_scanner);
-    }
-  } else {
-    undo_allocation(alloc_purpose, obj_ptr, word_sz);
-    obj = forward_ptr;
-  }
-  return obj;
-}
-
 template <class T>
 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
   if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
@@ -4891,24 +4669,10 @@
 }
 
 void G1ParEvacuateFollowersClosure::do_void() {
-  StarTask stolen_task;
   G1ParScanThreadState* const pss = par_scan_state();
   pss->trim_queue();
-
   do {
-    while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
-      assert(pss->verify_task(stolen_task), "sanity");
-      if (stolen_task.is_narrow()) {
-        pss->deal_with_reference((narrowOop*) stolen_task);
-      } else {
-        pss->deal_with_reference((oop*) stolen_task);
-      }
-
-      // We've just processed a reference and we might have made
-      // available new entries on the queues. So we have to make sure
-      // we drain the queues as necessary.
-      pss->trim_queue();
-    }
+    pss->steal_and_trim_queue(queues());
   } while (!offer_termination());
 }
 
@@ -4954,8 +4718,7 @@
   }
 
 public:
-  G1ParTask(G1CollectedHeap* g1h,
-            RefToScanQueueSet *task_queues)
+  G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
     : AbstractGangTask("G1 collection"),
       _g1h(g1h),
       _queues(task_queues),
@@ -5053,7 +4816,7 @@
         pss.print_termination_stats(worker_id);
       }
 
-      assert(pss.refs()->is_empty(), "should be empty");
+      assert(pss.queue_is_empty(), "should be empty");
 
       // Close the inner scope so that the ResourceMark and HandleMark
       // destructors are executed here and are included as part of the
@@ -5577,8 +5340,7 @@
 
     pss.set_evac_failure_closure(&evac_failure_cl);
 
-    assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
-
+    assert(pss.queue_is_empty(), "both queue and overflow should be empty");
 
     G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, &pss, NULL);
 
@@ -5632,7 +5394,7 @@
     G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
     drain_queue.do_void();
     // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
-    assert(pss.refs()->is_empty(), "should be");
+    assert(pss.queue_is_empty(), "should be");
   }
 };
 
@@ -5699,7 +5461,7 @@
 
   pss.set_evac_failure_closure(&evac_failure_cl);
 
-  assert(pss.refs()->is_empty(), "pre-condition");
+  assert(pss.queue_is_empty(), "pre-condition");
 
   G1ParScanExtRootClosure        only_copy_non_heap_cl(this, &pss, NULL);
 
@@ -5747,7 +5509,7 @@
   _gc_tracer_stw->report_gc_reference_stats(stats);
 
   // We have completed copying any necessary live referent objects.
-  assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
+  assert(pss.queue_is_empty(), "both queue and overflow should be empty");
 
   double ref_proc_time = os::elapsedTime() - ref_proc_start;
   g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
@@ -6603,7 +6365,7 @@
       // We really only need to do this for old regions given that we
       // should never scan survivors. But it doesn't hurt to do it
       // for survivors too.
-      new_alloc_region->set_saved_mark();
+      new_alloc_region->record_top_and_timestamp();
       if (survivor) {
         new_alloc_region->set_survivor();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -31,7 +31,6 @@
 #include "gc_implementation/g1/g1BiasedArray.hpp"
 #include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
-#include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
@@ -1715,256 +1714,4 @@
   }
 };
 
-class G1ParScanThreadState : public StackObj {
-protected:
-  G1CollectedHeap* _g1h;
-  RefToScanQueue*  _refs;
-  DirtyCardQueue   _dcq;
-  G1SATBCardTableModRefBS* _ct_bs;
-  G1RemSet* _g1_rem;
-
-  G1ParGCAllocBuffer  _surviving_alloc_buffer;
-  G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
-  ageTable            _age_table;
-
-  G1ParScanClosure    _scanner;
-
-  size_t           _alloc_buffer_waste;
-  size_t           _undo_waste;
-
-  OopsInHeapRegionClosure*      _evac_failure_cl;
-
-  int  _hash_seed;
-  uint _queue_num;
-
-  size_t _term_attempts;
-
-  double _start;
-  double _start_strong_roots;
-  double _strong_roots_time;
-  double _start_term;
-  double _term_time;
-
-  // Map from young-age-index (0 == not young, 1 is youngest) to
-  // surviving words. base is what we get back from the malloc call
-  size_t* _surviving_young_words_base;
-  // this points into the array, as we use the first few entries for padding
-  size_t* _surviving_young_words;
-
-#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
-
-  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
-
-  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
-
-  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
-
-  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
-
-  template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
-    // If the new value of the field points to the same region or
-    // is the to-space, we don't need to include it in the Rset updates.
-    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
-      size_t card_index = ctbs()->index_for(p);
-      // If the card hasn't been added to the buffer, do it.
-      if (ctbs()->mark_card_deferred(card_index)) {
-        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
-      }
-    }
-  }
-
-public:
-  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
-
-  ~G1ParScanThreadState() {
-    retire_alloc_buffers();
-    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
-  }
-
-  RefToScanQueue*   refs()            { return _refs;             }
-  ageTable*         age_table()       { return &_age_table;       }
-
-  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
-    return _alloc_buffers[purpose];
-  }
-
-  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
-  size_t undo_waste() const                      { return _undo_waste; }
-
-#ifdef ASSERT
-  bool verify_ref(narrowOop* ref) const;
-  bool verify_ref(oop* ref) const;
-  bool verify_task(StarTask ref) const;
-#endif // ASSERT
-
-  template <class T> void push_on_queue(T* ref) {
-    assert(verify_ref(ref), "sanity");
-    refs()->push(ref);
-  }
-
-  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
-
-  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
-    HeapWord* obj = NULL;
-    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
-    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
-      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
-      alloc_buf->retire(false /* end_of_gc */, false /* retain */);
-
-      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
-      if (buf == NULL) return NULL; // Let caller handle allocation failure.
-      // Otherwise.
-      alloc_buf->set_word_size(gclab_word_size);
-      alloc_buf->set_buf(buf);
-
-      obj = alloc_buf->allocate(word_sz);
-      assert(obj != NULL, "buffer was definitely big enough...");
-    } else {
-      obj = _g1h->par_allocate_during_gc(purpose, word_sz);
-    }
-    return obj;
-  }
-
-  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
-    HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
-    if (obj != NULL) return obj;
-    return allocate_slow(purpose, word_sz);
-  }
-
-  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
-    if (alloc_buffer(purpose)->contains(obj)) {
-      assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
-             "should contain whole object");
-      alloc_buffer(purpose)->undo_allocation(obj, word_sz);
-    } else {
-      CollectedHeap::fill_with_object(obj, word_sz);
-      add_to_undo_waste(word_sz);
-    }
-  }
-
-  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
-    _evac_failure_cl = evac_failure_cl;
-  }
-  OopsInHeapRegionClosure* evac_failure_closure() {
-    return _evac_failure_cl;
-  }
-
-  int* hash_seed() { return &_hash_seed; }
-  uint queue_num() { return _queue_num; }
-
-  size_t term_attempts() const  { return _term_attempts; }
-  void note_term_attempt() { _term_attempts++; }
-
-  void start_strong_roots() {
-    _start_strong_roots = os::elapsedTime();
-  }
-  void end_strong_roots() {
-    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
-  }
-  double strong_roots_time() const { return _strong_roots_time; }
-
-  void start_term_time() {
-    note_term_attempt();
-    _start_term = os::elapsedTime();
-  }
-  void end_term_time() {
-    _term_time += (os::elapsedTime() - _start_term);
-  }
-  double term_time() const { return _term_time; }
-
-  double elapsed_time() const {
-    return os::elapsedTime() - _start;
-  }
-
-  static void
-    print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
-  void
-    print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
-
-  size_t* surviving_young_words() {
-    // We add on to hide entry 0 which accumulates surviving words for
-    // age -1 regions (i.e. non-young ones)
-    return _surviving_young_words;
-  }
-
-private:
-  void retire_alloc_buffers() {
-    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-      size_t waste = _alloc_buffers[ap]->words_remaining();
-      add_to_alloc_buffer_waste(waste);
-      _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
-                                                 true /* end_of_gc */,
-                                                 false /* retain */);
-    }
-  }
-
-#define G1_PARTIAL_ARRAY_MASK 0x2
-
-  inline bool has_partial_array_mask(oop* ref) const {
-    return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
-  }
-
-  // We never encode partial array oops as narrowOop*, so return false immediately.
-  // This allows the compiler to create optimized code when popping references from
-  // the work queue.
-  inline bool has_partial_array_mask(narrowOop* ref) const {
-    assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
-    return false;
-  }
-
-  // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
-  // We always encode partial arrays as regular oop, to allow the
-  // specialization for has_partial_array_mask() for narrowOops above.
-  // This means that unintentional use of this method with narrowOops are caught
-  // by the compiler.
-  inline oop* set_partial_array_mask(oop obj) const {
-    assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-    return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
-  }
-
-  inline oop clear_partial_array_mask(oop* ref) const {
-    return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
-  }
-
-  inline void do_oop_partial_array(oop* p);
-
-  // This method is applied to the fields of the objects that have just been copied.
-  template <class T> void do_oop_evac(T* p, HeapRegion* from) {
-    assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
-           "Reference should not be NULL here as such are never pushed to the task queue.");
-    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
-
-    // Although we never intentionally push references outside of the collection
-    // set, due to (benign) races in the claim mechanism during RSet scanning more
-    // than one thread might claim the same card. So the same card may be
-    // processed multiple times. So redo this check.
-    if (_g1h->in_cset_fast_test(obj)) {
-      oop forwardee;
-      if (obj->is_forwarded()) {
-        forwardee = obj->forwardee();
-      } else {
-        forwardee = copy_to_survivor_space(obj);
-      }
-      assert(forwardee != NULL, "forwardee should not be NULL");
-      oopDesc::encode_store_heap_oop(p, forwardee);
-    }
-
-    assert(obj != NULL, "Must be");
-    update_rs(from, p, queue_num());
-  }
-public:
-
-  oop copy_to_survivor_space(oop const obj);
-
-  template <class T> inline void deal_with_reference(T* ref_to_scan);
-
-  inline void deal_with_reference(StarTask ref);
-
-public:
-  void trim_queue();
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -29,7 +29,6 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
-#include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
@@ -289,89 +288,4 @@
   return is_obj_ill(obj, heap_region_containing(obj));
 }
 
-template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
-  if (!from->is_survivor()) {
-    _g1_rem->par_write_ref(from, p, tid);
-  }
-}
-
-template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
-  if (G1DeferredRSUpdate) {
-    deferred_rs_update(from, p, tid);
-  } else {
-    immediate_rs_update(from, p, tid);
-  }
-}
-
-
-inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
-  assert(has_partial_array_mask(p), "invariant");
-  oop from_obj = clear_partial_array_mask(p);
-
-  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
-  assert(from_obj->is_objArray(), "must be obj array");
-  objArrayOop from_obj_array = objArrayOop(from_obj);
-  // The from-space object contains the real length.
-  int length                 = from_obj_array->length();
-
-  assert(from_obj->is_forwarded(), "must be forwarded");
-  oop to_obj                 = from_obj->forwardee();
-  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
-  objArrayOop to_obj_array   = objArrayOop(to_obj);
-  // We keep track of the next start index in the length field of the
-  // to-space object.
-  int next_index             = to_obj_array->length();
-  assert(0 <= next_index && next_index < length,
-         err_msg("invariant, next index: %d, length: %d", next_index, length));
-
-  int start                  = next_index;
-  int end                    = length;
-  int remainder              = end - start;
-  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
-  if (remainder > 2 * ParGCArrayScanChunk) {
-    end = start + ParGCArrayScanChunk;
-    to_obj_array->set_length(end);
-    // Push the remainder before we process the range in case another
-    // worker has run out of things to do and can steal it.
-    oop* from_obj_p = set_partial_array_mask(from_obj);
-    push_on_queue(from_obj_p);
-  } else {
-    assert(length == end, "sanity");
-    // We'll process the final range for this object. Restore the length
-    // so that the heap remains parsable in case of evacuation failure.
-    to_obj_array->set_length(end);
-  }
-  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
-  // Process indexes [start,end). It will also process the header
-  // along with the first chunk (i.e., the chunk with start == 0).
-  // Note that at this point the length field of to_obj_array is not
-  // correct given that we are using it to keep track of the next
-  // start index. oop_iterate_range() (thankfully!) ignores the length
-  // field and only relies on the start / end parameters.  It does
-  // however return the size of the object which will be incorrect. So
-  // we have to ignore it even if we wanted to use it.
-  to_obj_array->oop_iterate_range(&_scanner, start, end);
-}
-
-template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
-  if (!has_partial_array_mask(ref_to_scan)) {
-    // Note: we can use "raw" versions of "region_containing" because
-    // "obj_to_scan" is definitely in the heap, and is not in a
-    // humongous region.
-    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
-    do_oop_evac(ref_to_scan, r);
-  } else {
-    do_oop_partial_array((oop*)ref_to_scan);
-  }
-}
-
-inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
-  assert(verify_task(ref), "sanity");
-  if (ref.is_narrow()) {
-    deal_with_reference((narrowOop*)ref);
-  } else {
-    deal_with_reference((oop*)ref);
-  }
-}
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -25,7 +25,28 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.hpp"
 
 G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
   G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
   _cm(_g1->concurrent_mark()) {}
+
+G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
+  _g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
+
+G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+  _g1(g1), _par_scan_state(NULL),
+  _worker_id(UINT_MAX) {
+  set_par_scan_thread_state(par_scan_state);
+}
+
+void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
+  assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
+  assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
+
+  _par_scan_state = par_scan_state;
+  _worker_id = par_scan_state->queue_num();
+
+  assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u),
+         err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u)));
+}
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -51,8 +51,13 @@
   G1ParScanThreadState* _par_scan_state;
   uint _worker_id;
 public:
+  // Initializes the instance, leaving _par_scan_state uninitialized. Must be done
+  // later using the set_par_scan_thread_state() method.
+  G1ParClosureSuper(G1CollectedHeap* g1);
   G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
   bool apply_to_weak_ref_discovered_field() { return true; }
+
+  void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state);
 };
 
 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
@@ -68,9 +73,8 @@
 
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
-  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
-    G1ParClosureSuper(g1, par_scan_state)
-  {
+  G1ParScanClosure(G1CollectedHeap* g1, ReferenceProcessor* rp) :
+    G1ParClosureSuper(g1) {
     assert(_ref_processor == NULL, "sanity");
     _ref_processor = rp;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -28,6 +28,7 @@
 #include "gc_implementation/g1/concurrentMark.inline.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1OopClosures.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
 #include "gc_implementation/g1/g1RemSet.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1OopClosures.inline.hpp"
+#include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
+#include "oops/oop.inline.hpp"
+#include "oops/oop.pcgc.inline.hpp"
+#include "runtime/prefetch.inline.hpp"
+
+G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
+  : _g1h(g1h),
+    _refs(g1h->task_queue(queue_num)),
+    _dcq(&g1h->dirty_card_queue_set()),
+    _ct_bs(g1h->g1_barrier_set()),
+    _g1_rem(g1h->g1_rem_set()),
+    _hash_seed(17), _queue_num(queue_num),
+    _term_attempts(0),
+    _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
+    _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
+    _age_table(false), _scanner(g1h, rp),
+    _strong_roots_time(0), _term_time(0),
+    _alloc_buffer_waste(0), _undo_waste(0) {
+  _scanner.set_par_scan_thread_state(this);
+  // we allocate G1YoungSurvRateNumRegions plus one entries, since
+  // we "sacrifice" entry 0 to keep track of surviving bytes for
+  // non-young regions (where the age is -1)
+  // We also add a few elements at the beginning and at the end in
+  // an attempt to eliminate cache contention
+  uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
+  uint array_length = PADDING_ELEM_NUM +
+                      real_length +
+                      PADDING_ELEM_NUM;
+  _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
+  if (_surviving_young_words_base == NULL)
+    vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
+                          "Not enough space for young surv histo.");
+  _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
+  memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
+
+  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
+  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
+
+  _start = os::elapsedTime();
+}
+
+G1ParScanThreadState::~G1ParScanThreadState() {
+  retire_alloc_buffers();
+  FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
+}
+
+void
+G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
+{
+  st->print_raw_cr("GC Termination Stats");
+  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
+                   " ------waste (KiB)------");
+  st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
+                   "  total   alloc    undo");
+  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
+                   " ------- ------- -------");
+}
+
+void
+G1ParScanThreadState::print_termination_stats(int i,
+                                              outputStream* const st) const
+{
+  const double elapsed_ms = elapsed_time() * 1000.0;
+  const double s_roots_ms = strong_roots_time() * 1000.0;
+  const double term_ms    = term_time() * 1000.0;
+  st->print_cr("%3d %9.2f %9.2f %6.2f "
+               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
+               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
+               i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
+               term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
+               (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
+               alloc_buffer_waste() * HeapWordSize / K,
+               undo_waste() * HeapWordSize / K);
+}
+
+#ifdef ASSERT
+bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
+  assert(ref != NULL, "invariant");
+  assert(UseCompressedOops, "sanity");
+  assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref)));
+  oop p = oopDesc::load_decode_heap_oop(ref);
+  assert(_g1h->is_in_g1_reserved(p),
+         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  return true;
+}
+
+bool G1ParScanThreadState::verify_ref(oop* ref) const {
+  assert(ref != NULL, "invariant");
+  if (has_partial_array_mask(ref)) {
+    // Must be in the collection set--it's already been copied.
+    oop p = clear_partial_array_mask(ref);
+    assert(_g1h->obj_in_cs(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  } else {
+    oop p = oopDesc::load_decode_heap_oop(ref);
+    assert(_g1h->is_in_g1_reserved(p),
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
+  }
+  return true;
+}
+
+bool G1ParScanThreadState::verify_task(StarTask ref) const {
+  if (ref.is_narrow()) {
+    return verify_ref((narrowOop*) ref);
+  } else {
+    return verify_ref((oop*) ref);
+  }
+}
+#endif // ASSERT
+
+void G1ParScanThreadState::trim_queue() {
+  assert(_evac_failure_cl != NULL, "not set");
+
+  StarTask ref;
+  do {
+    // Drain the overflow stack first, so other threads can steal.
+    while (_refs->pop_overflow(ref)) {
+      dispatch_reference(ref);
+    }
+
+    while (_refs->pop_local(ref)) {
+      dispatch_reference(ref);
+    }
+  } while (!_refs->is_empty());
+}
+
+oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
+  size_t word_sz = old->size();
+  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
+  // +1 to make the -1 indexes valid...
+  int       young_index = from_region->young_index_in_cset()+1;
+  assert( (from_region->is_young() && young_index >  0) ||
+         (!from_region->is_young() && young_index == 0), "invariant" );
+  G1CollectorPolicy* g1p = _g1h->g1_policy();
+  markOop m = old->mark();
+  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
+                                           : m->age();
+  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
+                                                             word_sz);
+  HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
+#ifndef PRODUCT
+  // Should this evacuation fail?
+  if (_g1h->evacuation_should_fail()) {
+    if (obj_ptr != NULL) {
+      undo_allocation(alloc_purpose, obj_ptr, word_sz);
+      obj_ptr = NULL;
+    }
+  }
+#endif // !PRODUCT
+
+  if (obj_ptr == NULL) {
+    // This will either forward-to-self, or detect that someone else has
+    // installed a forwarding pointer.
+    return _g1h->handle_evacuation_failure_par(this, old);
+  }
+
+  oop obj = oop(obj_ptr);
+
+  // We're going to allocate linearly, so might as well prefetch ahead.
+  Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
+
+  oop forward_ptr = old->forward_to_atomic(obj);
+  if (forward_ptr == NULL) {
+    Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
+
+    // alloc_purpose is just a hint to allocate() above, recheck the type of region
+    // we actually allocated from and update alloc_purpose accordingly
+    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
+    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
+
+    if (g1p->track_object_age(alloc_purpose)) {
+      // We could simply do obj->incr_age(). However, this causes a
+      // performance issue. obj->incr_age() will first check whether
+      // the object has a displaced mark by checking its mark word;
+      // getting the mark word from the new location of the object
+      // stalls. So, given that we already have the mark word and we
+      // are about to install it anyway, it's better to increase the
+      // age on the mark word, when the object does not have a
+      // displaced mark word. We're not expecting many objects to have
+      // a displaced marked word, so that case is not optimized
+      // further (it could be...) and we simply call obj->incr_age().
+
+      if (m->has_displaced_mark_helper()) {
+        // in this case, we have to install the mark word first,
+        // otherwise obj looks to be forwarded (the old mark word,
+        // which contains the forward pointer, was copied)
+        obj->set_mark(m);
+        obj->incr_age();
+      } else {
+        m = m->incr_age();
+        obj->set_mark(m);
+      }
+      age_table()->add(obj, word_sz);
+    } else {
+      obj->set_mark(m);
+    }
+
+    if (G1StringDedup::is_enabled()) {
+      G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
+                                             to_region->is_young(),
+                                             queue_num(),
+                                             obj);
+    }
+
+    size_t* surv_young_words = surviving_young_words();
+    surv_young_words[young_index] += word_sz;
+
+    if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
+      // We keep track of the next start index in the length field of
+      // the to-space object. The actual length can be found in the
+      // length field of the from-space object.
+      arrayOop(obj)->set_length(0);
+      oop* old_p = set_partial_array_mask(old);
+      push_on_queue(old_p);
+    } else {
+      // No point in using the slower heap_region_containing() method,
+      // given that we know obj is in the heap.
+      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
+      obj->oop_iterate_backwards(&_scanner);
+    }
+  } else {
+    undo_allocation(alloc_purpose, obj_ptr, word_sz);
+    obj = forward_ptr;
+  }
+  return obj;
+}
+
+HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
+  HeapWord* obj = NULL;
+  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+  if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
+    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
+    add_to_alloc_buffer_waste(alloc_buf->words_remaining());
+    alloc_buf->retire(false /* end_of_gc */, false /* retain */);
+
+    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
+    if (buf == NULL) {
+      return NULL; // Let caller handle allocation failure.
+    }
+    // Otherwise.
+    alloc_buf->set_word_size(gclab_word_size);
+    alloc_buf->set_buf(buf);
+
+    obj = alloc_buf->allocate(word_sz);
+    assert(obj != NULL, "buffer was definitely big enough...");
+  } else {
+    obj = _g1h->par_allocate_during_gc(purpose, word_sz);
+  }
+  return obj;
+}
+
+void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
+  if (alloc_buffer(purpose)->contains(obj)) {
+    assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
+           "should contain whole object");
+    alloc_buffer(purpose)->undo_allocation(obj, word_sz);
+  } else {
+    CollectedHeap::fill_with_object(obj, word_sz);
+    add_to_undo_waste(word_sz);
+  }
+}
+
+HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
+  HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
+  if (obj != NULL) {
+    return obj;
+  }
+  return allocate_slow(purpose, word_sz);
+}
+
+void G1ParScanThreadState::retire_alloc_buffers() {
+  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
+    size_t waste = _alloc_buffers[ap]->words_remaining();
+    add_to_alloc_buffer_waste(waste);
+    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
+                                               true /* end_of_gc */,
+                                               false /* retain */);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
+
+#include "gc_implementation/g1/dirtyCardQueue.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1OopClosures.hpp"
+#include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/shared/ageTable.hpp"
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+
+class HeapRegion;
+class outputStream;
+
+class G1ParScanThreadState : public StackObj {
+ private:
+  G1CollectedHeap* _g1h;
+  RefToScanQueue*  _refs;
+  DirtyCardQueue   _dcq;
+  G1SATBCardTableModRefBS* _ct_bs;
+  G1RemSet* _g1_rem;
+
+  G1ParGCAllocBuffer  _surviving_alloc_buffer;
+  G1ParGCAllocBuffer  _tenured_alloc_buffer;
+  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  ageTable            _age_table;
+
+  G1ParScanClosure    _scanner;
+
+  size_t           _alloc_buffer_waste;
+  size_t           _undo_waste;
+
+  OopsInHeapRegionClosure*      _evac_failure_cl;
+
+  int  _hash_seed;
+  uint _queue_num;
+
+  size_t _term_attempts;
+
+  double _start;
+  double _start_strong_roots;
+  double _strong_roots_time;
+  double _start_term;
+  double _term_time;
+
+  // Map from young-age-index (0 == not young, 1 is youngest) to
+  // surviving words. base is what we get back from the malloc call
+  size_t* _surviving_young_words_base;
+  // this points into the array, as we use the first few entries for padding
+  size_t* _surviving_young_words;
+
+#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
+
+  void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
+
+  void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
+
+  DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
+  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
+
+  template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
+
+  template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
+    // If the new value of the field points to the same region or
+    // is the to-space, we don't need to include it in the Rset updates.
+    if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
+      size_t card_index = ctbs()->index_for(p);
+      // If the card hasn't been added to the buffer, do it.
+      if (ctbs()->mark_card_deferred(card_index)) {
+        dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
+      }
+    }
+  }
+
+ public:
+  G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
+  ~G1ParScanThreadState();
+
+  ageTable*         age_table()       { return &_age_table;       }
+
+  G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
+    return _alloc_buffers[purpose];
+  }
+
+  size_t alloc_buffer_waste() const              { return _alloc_buffer_waste; }
+  size_t undo_waste() const                      { return _undo_waste; }
+
+#ifdef ASSERT
+  bool queue_is_empty() const { return _refs->is_empty(); }
+
+  bool verify_ref(narrowOop* ref) const;
+  bool verify_ref(oop* ref) const;
+  bool verify_task(StarTask ref) const;
+#endif // ASSERT
+
+  template <class T> void push_on_queue(T* ref) {
+    assert(verify_ref(ref), "sanity");
+    _refs->push(ref);
+  }
+
+  template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
+
+ private:
+
+  inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
+  inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
+  inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
+
+ public:
+
+  void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
+    _evac_failure_cl = evac_failure_cl;
+  }
+
+  OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
+
+  int* hash_seed() { return &_hash_seed; }
+  uint queue_num() { return _queue_num; }
+
+  size_t term_attempts() const  { return _term_attempts; }
+  void note_term_attempt() { _term_attempts++; }
+
+  void start_strong_roots() {
+    _start_strong_roots = os::elapsedTime();
+  }
+  void end_strong_roots() {
+    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
+  }
+  double strong_roots_time() const { return _strong_roots_time; }
+
+  void start_term_time() {
+    note_term_attempt();
+    _start_term = os::elapsedTime();
+  }
+  void end_term_time() {
+    _term_time += (os::elapsedTime() - _start_term);
+  }
+  double term_time() const { return _term_time; }
+
+  double elapsed_time() const {
+    return os::elapsedTime() - _start;
+  }
+
+  static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
+  void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
+
+  size_t* surviving_young_words() {
+    // We add on to hide entry 0 which accumulates surviving words for
+    // age -1 regions (i.e. non-young ones)
+    return _surviving_young_words;
+  }
+
+ private:
+  void retire_alloc_buffers();
+
+  #define G1_PARTIAL_ARRAY_MASK 0x2
+
+  inline bool has_partial_array_mask(oop* ref) const {
+    return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
+  }
+
+  // We never encode partial array oops as narrowOop*, so return false immediately.
+  // This allows the compiler to create optimized code when popping references from
+  // the work queue.
+  inline bool has_partial_array_mask(narrowOop* ref) const {
+    assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
+    return false;
+  }
+
+  // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
+  // We always encode partial arrays as regular oop, to allow the
+  // specialization for has_partial_array_mask() for narrowOops above.
+  // This means that unintentional use of this method with narrowOops are caught
+  // by the compiler.
+  inline oop* set_partial_array_mask(oop obj) const {
+    assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
+    return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
+  }
+
+  inline oop clear_partial_array_mask(oop* ref) const {
+    return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
+  }
+
+  inline void do_oop_partial_array(oop* p);
+
+  // This method is applied to the fields of the objects that have just been copied.
+  template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
+
+  template <class T> inline void deal_with_reference(T* ref_to_scan);
+
+  inline void dispatch_reference(StarTask ref);
+ public:
+
+  oop copy_to_survivor_space(oop const obj);
+
+  void trim_queue();
+
+  inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
+
+#include "gc_implementation/g1/g1ParScanThreadState.hpp"
+#include "gc_implementation/g1/g1RemSet.inline.hpp"
+#include "oops/oop.inline.hpp"
+
+template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
+  if (!from->is_survivor()) {
+    _g1_rem->par_write_ref(from, p, tid);
+  }
+}
+
+template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
+  if (G1DeferredRSUpdate) {
+    deferred_rs_update(from, p, tid);
+  } else {
+    immediate_rs_update(from, p, tid);
+  }
+}
+
+template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
+  assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
+         "Reference should not be NULL here as such are never pushed to the task queue.");
+  oop obj = oopDesc::load_decode_heap_oop_not_null(p);
+
+  // Although we never intentionally push references outside of the collection
+  // set, due to (benign) races in the claim mechanism during RSet scanning more
+  // than one thread might claim the same card. So the same card may be
+  // processed multiple times. So redo this check.
+  if (_g1h->in_cset_fast_test(obj)) {
+    oop forwardee;
+    if (obj->is_forwarded()) {
+      forwardee = obj->forwardee();
+    } else {
+      forwardee = copy_to_survivor_space(obj);
+    }
+    assert(forwardee != NULL, "forwardee should not be NULL");
+    oopDesc::encode_store_heap_oop(p, forwardee);
+  }
+
+  assert(obj != NULL, "Must be");
+  update_rs(from, p, queue_num());
+}
+
+inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
+  assert(has_partial_array_mask(p), "invariant");
+  oop from_obj = clear_partial_array_mask(p);
+
+  assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
+  assert(from_obj->is_objArray(), "must be obj array");
+  objArrayOop from_obj_array = objArrayOop(from_obj);
+  // The from-space object contains the real length.
+  int length                 = from_obj_array->length();
+
+  assert(from_obj->is_forwarded(), "must be forwarded");
+  oop to_obj                 = from_obj->forwardee();
+  assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
+  objArrayOop to_obj_array   = objArrayOop(to_obj);
+  // We keep track of the next start index in the length field of the
+  // to-space object.
+  int next_index             = to_obj_array->length();
+  assert(0 <= next_index && next_index < length,
+         err_msg("invariant, next index: %d, length: %d", next_index, length));
+
+  int start                  = next_index;
+  int end                    = length;
+  int remainder              = end - start;
+  // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
+  if (remainder > 2 * ParGCArrayScanChunk) {
+    end = start + ParGCArrayScanChunk;
+    to_obj_array->set_length(end);
+    // Push the remainder before we process the range in case another
+    // worker has run out of things to do and can steal it.
+    oop* from_obj_p = set_partial_array_mask(from_obj);
+    push_on_queue(from_obj_p);
+  } else {
+    assert(length == end, "sanity");
+    // We'll process the final range for this object. Restore the length
+    // so that the heap remains parsable in case of evacuation failure.
+    to_obj_array->set_length(end);
+  }
+  _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
+  // Process indexes [start,end). It will also process the header
+  // along with the first chunk (i.e., the chunk with start == 0).
+  // Note that at this point the length field of to_obj_array is not
+  // correct given that we are using it to keep track of the next
+  // start index. oop_iterate_range() (thankfully!) ignores the length
+  // field and only relies on the start / end parameters.  It does
+  // however return the size of the object which will be incorrect. So
+  // we have to ignore it even if we wanted to use it.
+  to_obj_array->oop_iterate_range(&_scanner, start, end);
+}
+
+template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
+  if (!has_partial_array_mask(ref_to_scan)) {
+    // Note: we can use "raw" versions of "region_containing" because
+    // "obj_to_scan" is definitely in the heap, and is not in a
+    // humongous region.
+    HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
+    do_oop_evac(ref_to_scan, r);
+  } else {
+    do_oop_partial_array((oop*)ref_to_scan);
+  }
+}
+
+inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
+  assert(verify_task(ref), "sanity");
+  if (ref.is_narrow()) {
+    deal_with_reference((narrowOop*)ref);
+  } else {
+    deal_with_reference((oop*)ref);
+  }
+}
+
+void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
+  StarTask stolen_task;
+  while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
+    assert(verify_task(stolen_task), "sanity");
+    dispatch_reference(stolen_task);
+
+    // We've just processed a reference and we might have made
+    // available new entries on the queues. So we have to make sure
+    // we drain the queues as necessary.
+    trim_queue();
+  }
+}
+
+#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
+
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1REMSET_INLINE_HPP
 
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "oops/oop.inline.hpp"
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -66,6 +66,17 @@
   }
 }
 
+void G1SATBCardTableModRefBS::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
+  if (!dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+void G1SATBCardTableModRefBS::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
+  if (!dest_uninitialized) {
+    write_ref_array_pre_work(dst, count);
+  }
+}
+
 bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
   jbyte val = _byte_map[card_index];
   // It's already processed
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -86,16 +86,8 @@
   }
 
   template <class T> void write_ref_array_pre_work(T* dst, int count);
-  virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
-    if (!dest_uninitialized) {
-      write_ref_array_pre_work(dst, count);
-    }
-  }
-  virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
-    if (!dest_uninitialized) {
-      write_ref_array_pre_work(dst, count);
-    }
-  }
+  virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
+  virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
 
 /*
    Claimed and deferred bits are used together in G1 during the evacuation
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -30,6 +30,7 @@
 #include "gc_implementation/g1/heapRegion.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
+#include "gc_implementation/shared/liveRange.hpp"
 #include "memory/genOopClosures.inline.hpp"
 #include "memory/iterator.hpp"
 #include "memory/space.inline.hpp"
@@ -61,7 +62,7 @@
                                HeapRegion* hr,
                                HeapWord* cur, HeapWord* top) {
   oop cur_oop = oop(cur);
-  int oop_size = cur_oop->size();
+  size_t oop_size = hr->block_size(cur);
   HeapWord* next_obj = cur + oop_size;
   while (next_obj < top) {
     // Keep filtering the remembered set.
@@ -72,7 +73,7 @@
     }
     cur = next_obj;
     cur_oop = oop(cur);
-    oop_size = cur_oop->size();
+    oop_size = hr->block_size(cur);
     next_obj = cur + oop_size;
   }
   return cur;
@@ -82,7 +83,7 @@
                                       HeapWord* bottom,
                                       HeapWord* top) {
   G1CollectedHeap* g1h = _g1;
-  int oop_size;
+  size_t oop_size;
   ExtendedOopClosure* cl2 = NULL;
 
   FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
@@ -102,7 +103,7 @@
   if (!g1h->is_obj_dead(oop(bottom), _hr)) {
     oop_size = oop(bottom)->oop_iterate(cl2, mr);
   } else {
-    oop_size = oop(bottom)->size();
+    oop_size = _hr->block_size(bottom);
   }
 
   bottom += oop_size;
@@ -374,7 +375,7 @@
   // region.
   hr_clear(false /*par*/, false /*clear_space*/);
   set_top(bottom());
-  set_saved_mark();
+  record_top_and_timestamp();
 
   assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
 }
@@ -394,32 +395,6 @@
   return NULL;
 }
 
-void HeapRegion::save_marks() {
-  set_saved_mark();
-}
-
-void HeapRegion::oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl) {
-  HeapWord* p = mr.start();
-  HeapWord* e = mr.end();
-  oop obj;
-  while (p < e) {
-    obj = oop(p);
-    p += obj->oop_iterate(cl);
-  }
-  assert(p == e, "bad memregion: doesn't end on obj boundary");
-}
-
-#define HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
-void HeapRegion::oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
-  ContiguousSpace::oop_since_save_marks_iterate##nv_suffix(cl);              \
-}
-SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DEFN)
-
-
-void HeapRegion::oop_before_save_marks_iterate(ExtendedOopClosure* cl) {
-  oops_in_mr_iterate(MemRegion(bottom(), saved_mark_word()), cl);
-}
-
 void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
                                                     bool during_conc_mark) {
   // We always recreate the prev marking info and we'll explicitly
@@ -476,7 +451,7 @@
     } else if (!g1h->is_obj_dead(obj)) {
       cl->do_object(obj);
     }
-    cur += obj->size();
+    cur += block_size(cur);
   }
   return NULL;
 }
@@ -548,7 +523,7 @@
       return cur;
     }
     // Otherwise...
-    next = (cur + obj->size());
+    next = cur + block_size(cur);
   }
 
   // If we finish the above loop...We have a parseable object that
@@ -556,10 +531,9 @@
   // inside or spans the entire region.
 
   assert(obj == oop(cur), "sanity");
-  assert(cur <= start &&
-         obj->klass_or_null() != NULL &&
-         (cur + obj->size()) > start,
-         "Loop postcondition");
+  assert(cur <= start, "Loop postcondition");
+  assert(obj->klass_or_null() != NULL, "Loop postcondition");
+  assert((cur + block_size(cur)) > start, "Loop postcondition");
 
   if (!g1h->is_obj_dead(obj)) {
     obj->oop_iterate(cl, mr);
@@ -573,7 +547,7 @@
     };
 
     // Otherwise:
-    next = (cur + obj->size());
+    next = cur + block_size(cur);
 
     if (!g1h->is_obj_dead(obj)) {
       if (next < end || !obj->is_objArray()) {
@@ -928,7 +902,7 @@
   size_t object_num = 0;
   while (p < top()) {
     oop obj = oop(p);
-    size_t obj_size = obj->size();
+    size_t obj_size = block_size(p);
     object_num += 1;
 
     if (is_humongous != g1->isHumongous(obj_size)) {
@@ -1064,7 +1038,9 @@
 // away eventually.
 
 void G1OffsetTableContigSpace::clear(bool mangle_space) {
-  ContiguousSpace::clear(mangle_space);
+  set_top(bottom());
+  set_saved_mark_word(bottom());
+  CompactibleSpace::clear(mangle_space);
   _offsets.zero_bottom_entry();
   _offsets.initialize_threshold();
 }
@@ -1102,10 +1078,10 @@
   if (_gc_time_stamp < g1h->get_gc_time_stamp())
     return top();
   else
-    return ContiguousSpace::saved_mark_word();
+    return Space::saved_mark_word();
 }
 
-void G1OffsetTableContigSpace::set_saved_mark() {
+void G1OffsetTableContigSpace::record_top_and_timestamp() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
 
@@ -1117,7 +1093,7 @@
     // of region. If it does so after _gc_time_stamp = ..., then it
     // will pick up the right saved_mark_word() as the high water mark
     // of the region. Either way, the behavior will be correct.
-    ContiguousSpace::set_saved_mark();
+    Space::set_saved_mark_word(top());
     OrderAccess::storestore();
     _gc_time_stamp = curr_gc_time_stamp;
     // No need to do another barrier to flush the writes above. If
@@ -1128,6 +1104,26 @@
   }
 }
 
+void G1OffsetTableContigSpace::safe_object_iterate(ObjectClosure* blk) {
+  object_iterate(blk);
+}
+
+void G1OffsetTableContigSpace::object_iterate(ObjectClosure* blk) {
+  HeapWord* p = bottom();
+  while (p < top()) {
+    if (block_is_obj(p)) {
+      blk->do_object(oop(p));
+    }
+    p += block_size(p);
+  }
+}
+
+#define block_is_always_obj(q) true
+void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
+  SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
+}
+#undef block_is_always_obj
+
 G1OffsetTableContigSpace::
 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                          MemRegion mr) :
@@ -1137,7 +1133,8 @@
 {
   _offsets.set_space(this);
   // false ==> we'll do the clearing if there's clearing to be done.
-  ContiguousSpace::initialize(mr, false, SpaceDecorator::Mangle);
+  CompactibleSpace::initialize(mr, false, SpaceDecorator::Mangle);
+  _top = bottom();
   _offsets.zero_bottom_entry();
   _offsets.initialize_threshold();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -46,8 +46,6 @@
 // The solution is to remove this method from the definition
 // of a Space.
 
-class CompactibleSpace;
-class ContiguousSpace;
 class HeapRegionRemSet;
 class HeapRegionRemSetIterator;
 class HeapRegion;
@@ -125,9 +123,9 @@
 // the regions anyway) and at the end of a Full GC. The current scheme
 // that uses sequential unsigned ints will fail only if we have 4b
 // evacuation pauses between two cleanups, which is _highly_ unlikely.
-
-class G1OffsetTableContigSpace: public ContiguousSpace {
+class G1OffsetTableContigSpace: public CompactibleSpace {
   friend class VMStructs;
+  HeapWord* _top;
  protected:
   G1BlockOffsetArrayContigSpace _offsets;
   Mutex _par_alloc_lock;
@@ -144,11 +142,32 @@
   G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                            MemRegion mr);
 
+  void set_top(HeapWord* value) { _top = value; }
+  HeapWord* top() const { return _top; }
+
+ protected:
+  HeapWord** top_addr() { return &_top; }
+  // Allocation helpers (return NULL if full).
+  inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
+  inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
+
+ public:
+  void reset_after_compaction() { set_top(compaction_top()); }
+
+  size_t used() const { return byte_size(bottom(), top()); }
+  size_t free() const { return byte_size(top(), end()); }
+  bool is_free_block(const HeapWord* p) const { return p >= top(); }
+
+  MemRegion used_region() const { return MemRegion(bottom(), top()); }
+
+  void object_iterate(ObjectClosure* blk);
+  void safe_object_iterate(ObjectClosure* blk);
+
   void set_bottom(HeapWord* value);
   void set_end(HeapWord* value);
 
   virtual HeapWord* saved_mark_word() const;
-  virtual void set_saved_mark();
+  void record_top_and_timestamp();
   void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   unsigned get_gc_time_stamp() { return _gc_time_stamp; }
 
@@ -168,6 +187,8 @@
   HeapWord* block_start(const void* p);
   HeapWord* block_start_const(const void* p) const;
 
+  void prepare_for_compaction(CompactPoint* cp);
+
   // Add offset table update.
   virtual HeapWord* allocate(size_t word_size);
   HeapWord* par_allocate(size_t word_size);
@@ -202,10 +223,6 @@
     ContinuesHumongous
   };
 
-  // Requires that the region "mr" be dense with objects, and begin and end
-  // with an object.
-  void oops_in_mr_iterate(MemRegion mr, ExtendedOopClosure* cl);
-
   // The remembered set for this region.
   // (Might want to make this "inline" later, to avoid some alloc failure
   // issues.)
@@ -353,14 +370,15 @@
     ParMarkRootClaimValue      = 9
   };
 
-  inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
-    assert(is_young(), "we can only skip BOT updates on young regions");
-    return ContiguousSpace::par_allocate(word_size);
-  }
-  inline HeapWord* allocate_no_bot_updates(size_t word_size) {
-    assert(is_young(), "we can only skip BOT updates on young regions");
-    return ContiguousSpace::allocate(word_size);
-  }
+  // All allocated blocks are occupied by objects in a HeapRegion
+  bool block_is_obj(const HeapWord* p) const;
+
+  // Returns the object size for all valid block starts
+  // and the amount of unallocated words if called on top()
+  size_t block_size(const HeapWord* p) const;
+
+  inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
+  inline HeapWord* allocate_no_bot_updates(size_t word_size);
 
   // If this region is a member of a HeapRegionSeq, the index in that
   // sequence, otherwise -1.
@@ -569,9 +587,6 @@
 
   HeapWord* orig_end() { return _orig_end; }
 
-  // Allows logical separation between objects allocated before and after.
-  void save_marks();
-
   // Reset HR stuff to default values.
   void hr_clear(bool par, bool clear_space, bool locked = false);
   void par_clear();
@@ -580,10 +595,6 @@
   HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
   HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
 
-  // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
-  // allocated in the current region before the last call to "save_mark".
-  void oop_before_save_marks_iterate(ExtendedOopClosure* cl);
-
   // Note the start or end of marking. This tells the heap region
   // that the collector is about to start or has finished (concurrently)
   // marking the heap.
@@ -769,10 +780,6 @@
     _predicted_bytes_to_copy = bytes;
   }
 
-#define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)  \
-  virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
-  SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
-
   virtual CompactibleSpace* next_compaction_space() const;
 
   virtual void reset_after_compaction();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.inline.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -26,9 +26,48 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP
 
 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+#include "gc_implementation/g1/heapRegion.hpp"
+#include "memory/space.hpp"
+#include "runtime/atomic.inline.hpp"
+
+// This version requires locking.
+inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size,
+                                                HeapWord* const end_value) {
+  HeapWord* obj = top();
+  if (pointer_delta(end_value, obj) >= size) {
+    HeapWord* new_top = obj + size;
+    set_top(new_top);
+    assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+    return obj;
+  } else {
+    return NULL;
+  }
+}
+
+// This version is lock-free.
+inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size,
+                                                    HeapWord* const end_value) {
+  do {
+    HeapWord* obj = top();
+    if (pointer_delta(end_value, obj) >= size) {
+      HeapWord* new_top = obj + size;
+      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
+      // result can be one of two:
+      //  the old top value: the exchange succeeded
+      //  otherwise: the new value of the top is returned.
+      if (result == obj) {
+        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
+        return obj;
+      }
+    } else {
+      return NULL;
+    }
+  } while (true);
+}
 
 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) {
-  HeapWord* res = ContiguousSpace::allocate(size);
+  HeapWord* res = allocate_impl(size, end());
   if (res != NULL) {
     _offsets.alloc_block(res, size);
   }
@@ -40,12 +79,7 @@
 // this is used for larger LAB allocations only.
 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) {
   MutexLocker x(&_par_alloc_lock);
-  // Given that we take the lock no need to use par_allocate() here.
-  HeapWord* res = ContiguousSpace::allocate(size);
-  if (res != NULL) {
-    _offsets.alloc_block(res, size);
-  }
-  return res;
+  return allocate(size);
 }
 
 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) {
@@ -57,6 +91,32 @@
   return _offsets.block_start_const(p);
 }
 
+inline bool
+HeapRegion::block_is_obj(const HeapWord* p) const {
+  return p < top();
+}
+
+inline size_t
+HeapRegion::block_size(const HeapWord *addr) const {
+  const HeapWord* current_top = top();
+  if (addr < current_top) {
+    return oop(addr)->size();
+  } else {
+    assert(addr == current_top, "just checking");
+    return pointer_delta(end(), addr);
+  }
+}
+
+inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) {
+  assert(is_young(), "we can only skip BOT updates on young regions");
+  return par_allocate_impl(word_size, end());
+}
+
+inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) {
+  assert(is_young(), "we can only skip BOT updates on young regions");
+  return allocate_impl(word_size, end());
+}
+
 inline void HeapRegion::note_start_of_marking() {
   _next_marked_bytes = 0;
   _next_top_at_mark_start = top();
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -34,6 +34,8 @@
   static_field(HeapRegion, GrainBytes,        size_t)                         \
   static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
                                                                               \
+  nonstatic_field(G1OffsetTableContigSpace, _top,       HeapWord*)            \
+                                                                              \
   nonstatic_field(G1HeapRegionTable, _base,             address)              \
   nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
   nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
@@ -69,7 +71,8 @@
                                                                               \
   declare_type(G1CollectedHeap, SharedHeap)                                   \
                                                                               \
-  declare_type(HeapRegion, ContiguousSpace)                                   \
+  declare_type(G1OffsetTableContigSpace, CompactibleSpace)                    \
+  declare_type(HeapRegion, G1OffsetTableContigSpace)                          \
   declare_toplevel_type(HeapRegionSeq)                                        \
   declare_toplevel_type(HeapRegionSetBase)                                    \
   declare_toplevel_type(HeapRegionSetCount)                                   \
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,657 +0,0 @@
-/*
- * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
-#include "gc_implementation/parNew/asParNewGeneration.hpp"
-#include "gc_implementation/parNew/parNewGeneration.hpp"
-#include "gc_implementation/shared/markSweep.inline.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
-#include "memory/defNewGeneration.inline.hpp"
-#include "memory/referencePolicy.hpp"
-#include "oops/markOop.inline.hpp"
-#include "oops/oop.pcgc.inline.hpp"
-
-ASParNewGeneration::ASParNewGeneration(ReservedSpace rs,
-                                       size_t initial_byte_size,
-                                       size_t min_byte_size,
-                                       int level) :
-  ParNewGeneration(rs, initial_byte_size, level),
-  _min_gen_size(min_byte_size) {}
-
-const char* ASParNewGeneration::name() const {
-  return "adaptive size par new generation";
-}
-
-void ASParNewGeneration::adjust_desired_tenuring_threshold() {
-  assert(UseAdaptiveSizePolicy,
-    "Should only be used with UseAdaptiveSizePolicy");
-}
-
-void ASParNewGeneration::resize(size_t eden_size, size_t survivor_size) {
-  // Resize the generation if needed. If the generation resize
-  // reports false, do not attempt to resize the spaces.
-  if (resize_generation(eden_size, survivor_size)) {
-    // Then we lay out the spaces inside the generation
-    resize_spaces(eden_size, survivor_size);
-
-    space_invariants();
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("Young generation size: "
-        "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT
-        " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT
-        " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT,
-        eden_size, survivor_size, used(), capacity(),
-        max_gen_size(), min_gen_size());
-    }
-  }
-}
-
-size_t ASParNewGeneration::available_to_min_gen() {
-  assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant");
-  return virtual_space()->committed_size() - min_gen_size();
-}
-
-// This method assumes that from-space has live data and that
-// any shrinkage of the young gen is limited by location of
-// from-space.
-size_t ASParNewGeneration::available_to_live() const {
-#undef SHRINKS_AT_END_OF_EDEN
-#ifdef SHRINKS_AT_END_OF_EDEN
-  size_t delta_in_survivor = 0;
-  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
-  const size_t space_alignment = heap->intra_heap_alignment();
-  const size_t gen_alignment = heap->object_heap_alignment();
-
-  MutableSpace* space_shrinking = NULL;
-  if (from_space()->end() > to_space()->end()) {
-    space_shrinking = from_space();
-  } else {
-    space_shrinking = to_space();
-  }
-
-  // Include any space that is committed but not included in
-  // the survivor spaces.
-  assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(),
-    "Survivor space beyond high end");
-  size_t unused_committed = pointer_delta(virtual_space()->high(),
-    space_shrinking->end(), sizeof(char));
-
-  if (space_shrinking->is_empty()) {
-    // Don't let the space shrink to 0
-    assert(space_shrinking->capacity_in_bytes() >= space_alignment,
-      "Space is too small");
-    delta_in_survivor = space_shrinking->capacity_in_bytes() - space_alignment;
-  } else {
-    delta_in_survivor = pointer_delta(space_shrinking->end(),
-                                      space_shrinking->top(),
-                                      sizeof(char));
-  }
-
-  size_t delta_in_bytes = unused_committed + delta_in_survivor;
-  delta_in_bytes = align_size_down(delta_in_bytes, gen_alignment);
-  return delta_in_bytes;
-#else
-  // The only space available for shrinking is in to-space if it
-  // is above from-space.
-  if (to()->bottom() > from()->bottom()) {
-    const size_t alignment = os::vm_page_size();
-    if (to()->capacity() < alignment) {
-      return 0;
-    } else {
-      return to()->capacity() - alignment;
-    }
-  } else {
-    return 0;
-  }
-#endif
-}
-
-// Return the number of bytes available for resizing down the young
-// generation.  This is the minimum of
-//      input "bytes"
-//      bytes to the minimum young gen size
-//      bytes to the size currently being used + some small extra
-size_t ASParNewGeneration::limit_gen_shrink (size_t bytes) {
-  // Allow shrinkage into the current eden but keep eden large enough
-  // to maintain the minimum young gen size
-  bytes = MIN3(bytes, available_to_min_gen(), available_to_live());
-  return align_size_down(bytes, os::vm_page_size());
-}
-
-// Note that the the alignment used is the OS page size as
-// opposed to an alignment associated with the virtual space
-// (as is done in the ASPSYoungGen/ASPSOldGen)
-bool ASParNewGeneration::resize_generation(size_t eden_size,
-                                           size_t survivor_size) {
-  const size_t alignment = os::vm_page_size();
-  size_t orig_size = virtual_space()->committed_size();
-  bool size_changed = false;
-
-  // There used to be this guarantee there.
-  // guarantee ((eden_size + 2*survivor_size)  <= _max_gen_size, "incorrect input arguments");
-  // Code below forces this requirement.  In addition the desired eden
-  // size and desired survivor sizes are desired goals and may
-  // exceed the total generation size.
-
-  assert(min_gen_size() <= orig_size && orig_size <= max_gen_size(),
-    "just checking");
-
-  // Adjust new generation size
-  const size_t eden_plus_survivors =
-          align_size_up(eden_size + 2 * survivor_size, alignment);
-  size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_gen_size()),
-                             min_gen_size());
-  assert(desired_size <= max_gen_size(), "just checking");
-
-  if (desired_size > orig_size) {
-    // Grow the generation
-    size_t change = desired_size - orig_size;
-    assert(change % alignment == 0, "just checking");
-    if (expand(change)) {
-      return false; // Error if we fail to resize!
-    }
-    size_changed = true;
-  } else if (desired_size < orig_size) {
-    size_t desired_change = orig_size - desired_size;
-    assert(desired_change % alignment == 0, "just checking");
-
-    desired_change = limit_gen_shrink(desired_change);
-
-    if (desired_change > 0) {
-      virtual_space()->shrink_by(desired_change);
-      reset_survivors_after_shrink();
-
-      size_changed = true;
-    }
-  } else {
-    if (Verbose && PrintGC) {
-      if (orig_size == max_gen_size()) {
-        gclog_or_tty->print_cr("ASParNew generation size at maximum: "
-          SIZE_FORMAT "K", orig_size/K);
-      } else if (orig_size == min_gen_size()) {
-        gclog_or_tty->print_cr("ASParNew generation size at minium: "
-          SIZE_FORMAT "K", orig_size/K);
-      }
-    }
-  }
-
-  if (size_changed) {
-    MemRegion cmr((HeapWord*)virtual_space()->low(),
-                  (HeapWord*)virtual_space()->high());
-    GenCollectedHeap::heap()->barrier_set()->resize_covered_region(cmr);
-
-    if (Verbose && PrintGC) {
-      size_t current_size  = virtual_space()->committed_size();
-      gclog_or_tty->print_cr("ASParNew generation size changed: "
-                             SIZE_FORMAT "K->" SIZE_FORMAT "K",
-                             orig_size/K, current_size/K);
-    }
-  }
-
-  guarantee(eden_plus_survivors <= virtual_space()->committed_size() ||
-            virtual_space()->committed_size() == max_gen_size(), "Sanity");
-
-  return true;
-}
-
-void ASParNewGeneration::reset_survivors_after_shrink() {
-
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  HeapWord* new_end = (HeapWord*)virtual_space()->high();
-
-  if (from()->end() > to()->end()) {
-    assert(new_end >= from()->end(), "Shrinking past from-space");
-  } else {
-    assert(new_end >= to()->bottom(), "Shrink was too large");
-    // Was there a shrink of the survivor space?
-    if (new_end < to()->end()) {
-      MemRegion mr(to()->bottom(), new_end);
-      to()->initialize(mr,
-                       SpaceDecorator::DontClear,
-                       SpaceDecorator::DontMangle);
-    }
-  }
-}
-void ASParNewGeneration::resize_spaces(size_t requested_eden_size,
-                                       size_t requested_survivor_size) {
-  assert(UseAdaptiveSizePolicy, "sanity check");
-  assert(requested_eden_size > 0  && requested_survivor_size > 0,
-         "just checking");
-  CollectedHeap* heap = Universe::heap();
-  assert(heap->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
-
-
-  // We require eden and to space to be empty
-  if ((!eden()->is_empty()) || (!to()->is_empty())) {
-    return;
-  }
-
-  size_t cur_eden_size = eden()->capacity();
-
-  if (PrintAdaptiveSizePolicy && Verbose) {
-    gclog_or_tty->print_cr("ASParNew::resize_spaces(requested_eden_size: "
-                  SIZE_FORMAT
-                  ", requested_survivor_size: " SIZE_FORMAT ")",
-                  requested_eden_size, requested_survivor_size);
-    gclog_or_tty->print_cr("    eden: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(eden()->bottom()),
-                  p2i(eden()->end()),
-                  pointer_delta(eden()->end(),
-                                eden()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("    from: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(from()->bottom()),
-                  p2i(from()->end()),
-                  pointer_delta(from()->end(),
-                                from()->bottom(),
-                                sizeof(char)));
-    gclog_or_tty->print_cr("      to: [" PTR_FORMAT ".." PTR_FORMAT ") "
-                  SIZE_FORMAT,
-                  p2i(to()->bottom()),
-                  p2i(to()->end()),
-                  pointer_delta(  to()->end(),
-                                  to()->bottom(),
-                                  sizeof(char)));
-  }
-
-  // There's nothing to do if the new sizes are the same as the current
-  if (requested_survivor_size == to()->capacity() &&
-      requested_survivor_size == from()->capacity() &&
-      requested_eden_size == eden()->capacity()) {
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    capacities are the right sizes, returning");
-    }
-    return;
-  }
-
-  char* eden_start = (char*)eden()->bottom();
-  char* eden_end   = (char*)eden()->end();
-  char* from_start = (char*)from()->bottom();
-  char* from_end   = (char*)from()->end();
-  char* to_start   = (char*)to()->bottom();
-  char* to_end     = (char*)to()->end();
-
-  const size_t alignment = os::vm_page_size();
-  const bool maintain_minimum =
-    (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size();
-
-  // Check whether from space is below to space
-  if (from_start < to_start) {
-    // Eden, from, to
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, from, to:");
-    }
-
-    // Set eden
-    // "requested_eden_size" is a goal for the size of eden
-    // and may not be attainable.  "eden_size" below is
-    // calculated based on the location of from-space and
-    // the goal for the size of eden.  from-space is
-    // fixed in place because it contains live data.
-    // The calculation is done this way to avoid 32bit
-    // overflow (i.e., eden_start + requested_eden_size
-    // may too large for representation in 32bits).
-    size_t eden_size;
-    if (maintain_minimum) {
-      // Only make eden larger than the requested size if
-      // the minimum size of the generation has to be maintained.
-      // This could be done in general but policy at a higher
-      // level is determining a requested size for eden and that
-      // should be honored unless there is a fundamental reason.
-      eden_size = pointer_delta(from_start,
-                                eden_start,
-                                sizeof(char));
-    } else {
-      eden_size = MIN2(requested_eden_size,
-                       pointer_delta(from_start, eden_start, sizeof(char)));
-    }
-
-    eden_size = align_size_down(eden_size, alignment);
-    eden_end = eden_start + eden_size;
-    assert(eden_end >= eden_start, "addition overflowed");
-
-    // To may resize into from space as long as it is clear of live data.
-    // From space must remain page aligned, though, so we need to do some
-    // extra calculations.
-
-    // First calculate an optimal to-space
-    to_end   = (char*)virtual_space()->high();
-    to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size,
-                                    sizeof(char));
-
-    // Does the optimal to-space overlap from-space?
-    if (to_start < (char*)from()->end()) {
-      // Calculate the minimum offset possible for from_end
-      size_t from_size = pointer_delta(from()->top(), from_start, sizeof(char));
-
-      // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME!
-      if (from_size == 0) {
-        from_size = alignment;
-      } else {
-        from_size = align_size_up(from_size, alignment);
-      }
-
-      from_end = from_start + from_size;
-      assert(from_end > from_start, "addition overflow or from_size problem");
-
-      guarantee(from_end <= (char*)from()->end(), "from_end moved to the right");
-
-      // Now update to_start with the new from_end
-      to_start = MAX2(from_end, to_start);
-    } else {
-      // If shrinking, move to-space down to abut the end of from-space
-      // so that shrinking will move to-space down.  If not shrinking
-      // to-space is moving up to allow for growth on the next expansion.
-      if (requested_eden_size <= cur_eden_size) {
-        to_start = from_end;
-        if (to_start + requested_survivor_size > to_start) {
-          to_end = to_start + requested_survivor_size;
-        }
-      }
-      // else leave to_end pointing to the high end of the virtual space.
-    }
-
-    guarantee(to_start != to_end, "to space is zero sized");
-
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-    }
-  } else {
-    // Eden, to, from
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("  Eden, to, from:");
-    }
-
-    // Calculate the to-space boundaries based on
-    // the start of from-space.
-    to_end = from_start;
-    to_start = (char*)pointer_delta(from_start,
-                                    (char*)requested_survivor_size,
-                                    sizeof(char));
-    // Calculate the ideal eden boundaries.
-    // eden_end is already at the bottom of the generation
-    assert(eden_start == virtual_space()->low(),
-      "Eden is not starting at the low end of the virtual space");
-    if (eden_start + requested_eden_size >= eden_start) {
-      eden_end = eden_start + requested_eden_size;
-    } else {
-      eden_end = to_start;
-    }
-
-    // Does eden intrude into to-space?  to-space
-    // gets priority but eden is not allowed to shrink
-    // to 0.
-    if (eden_end > to_start) {
-      eden_end = to_start;
-    }
-
-    // Don't let eden shrink down to 0 or less.
-    eden_end = MAX2(eden_end, eden_start + alignment);
-    assert(eden_start + alignment >= eden_start, "Overflow");
-
-    size_t eden_size;
-    if (maintain_minimum) {
-      // Use all the space available.
-      eden_end = MAX2(eden_end, to_start);
-      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
-      eden_size = MIN2(eden_size, cur_eden_size);
-    } else {
-      eden_size = pointer_delta(eden_end, eden_start, sizeof(char));
-    }
-    eden_size = align_size_down(eden_size, alignment);
-    assert(maintain_minimum || eden_size <= requested_eden_size,
-      "Eden size is too large");
-    assert(eden_size >= alignment, "Eden size is too small");
-    eden_end = eden_start + eden_size;
-
-    // Move to-space down to eden.
-    if (requested_eden_size < cur_eden_size) {
-      to_start = eden_end;
-      if (to_start + requested_survivor_size > to_start) {
-        to_end = MIN2(from_start, to_start + requested_survivor_size);
-      } else {
-        to_end = from_start;
-      }
-    }
-
-    // eden_end may have moved so again make sure
-    // the to-space and eden don't overlap.
-    to_start = MAX2(eden_end, to_start);
-
-    // from-space
-    size_t from_used = from()->used();
-    if (requested_survivor_size > from_used) {
-      if (from_start + requested_survivor_size >= from_start) {
-        from_end = from_start + requested_survivor_size;
-      }
-      if (from_end > virtual_space()->high()) {
-        from_end = virtual_space()->high();
-      }
-    }
-
-    assert(to_start >= eden_end, "to-space should be above eden");
-    if (PrintAdaptiveSizePolicy && Verbose) {
-      gclog_or_tty->print_cr("    [eden_start .. eden_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(eden_start),
-                    p2i(eden_end),
-                    pointer_delta(eden_end, eden_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [  to_start ..   to_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(to_start),
-                    p2i(to_end),
-                    pointer_delta(  to_end,   to_start, sizeof(char)));
-      gclog_or_tty->print_cr("    [from_start .. from_end): "
-                    "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT,
-                    p2i(from_start),
-                    p2i(from_end),
-                    pointer_delta(from_end, from_start, sizeof(char)));
-    }
-  }
-
-
-  guarantee((HeapWord*)from_start <= from()->bottom(),
-            "from start moved to the right");
-  guarantee((HeapWord*)from_end >= from()->top(),
-            "from end moved into live data");
-  assert(is_object_aligned((intptr_t)eden_start), "checking alignment");
-  assert(is_object_aligned((intptr_t)from_start), "checking alignment");
-  assert(is_object_aligned((intptr_t)to_start), "checking alignment");
-
-  MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end);
-  MemRegion toMR  ((HeapWord*)to_start,   (HeapWord*)to_end);
-  MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end);
-
-  // Let's make sure the call to initialize doesn't reset "top"!
-  HeapWord* old_from_top = from()->top();
-
-  // For PrintAdaptiveSizePolicy block  below
-  size_t old_from = from()->capacity();
-  size_t old_to   = to()->capacity();
-
-  // If not clearing the spaces, do some checking to verify that
-  // the spaces are already mangled.
-
-  // Must check mangling before the spaces are reshaped.  Otherwise,
-  // the bottom or end of one space may have moved into another
-  // a failure of the check may not correctly indicate which space
-  // is not properly mangled.
-  if (ZapUnusedHeapArea) {
-    HeapWord* limit = (HeapWord*) virtual_space()->high();
-    eden()->check_mangled_unused_area(limit);
-    from()->check_mangled_unused_area(limit);
-      to()->check_mangled_unused_area(limit);
-  }
-
-  // The call to initialize NULL's the next compaction space
-  eden()->initialize(edenMR,
-                     SpaceDecorator::Clear,
-                     SpaceDecorator::DontMangle);
-  eden()->set_next_compaction_space(from());
-    to()->initialize(toMR  ,
-                     SpaceDecorator::Clear,
-                     SpaceDecorator::DontMangle);
-  from()->initialize(fromMR,
-                     SpaceDecorator::DontClear,
-                     SpaceDecorator::DontMangle);
-
-  assert(from()->top() == old_from_top, "from top changed!");
-
-  if (PrintAdaptiveSizePolicy) {
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    assert(gch->kind() == CollectedHeap::GenCollectedHeap, "Sanity");
-
-    gclog_or_tty->print("AdaptiveSizePolicy::survivor space sizes: "
-                  "collection: %d "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> "
-                  "(" SIZE_FORMAT ", " SIZE_FORMAT ") ",
-                  gch->total_collections(),
-                  old_from, old_to,
-                  from()->capacity(),
-                  to()->capacity());
-    gclog_or_tty->cr();
-  }
-}
-
-void ASParNewGeneration::compute_new_size() {
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
-    "not a CMS generational heap");
-
-
-  CMSAdaptiveSizePolicy* size_policy =
-    (CMSAdaptiveSizePolicy*)gch->gen_policy()->size_policy();
-  assert(size_policy->is_gc_cms_adaptive_size_policy(),
-    "Wrong type of size policy");
-
-  size_t survived = from()->used();
-  if (!survivor_overflow()) {
-    // Keep running averages on how much survived
-    size_policy->avg_survived()->sample(survived);
-  } else {
-    size_t promoted =
-      (size_t) next_gen()->gc_stats()->avg_promoted()->last_sample();
-    assert(promoted < gch->capacity(), "Conversion problem?");
-    size_t survived_guess = survived + promoted;
-    size_policy->avg_survived()->sample(survived_guess);
-  }
-
-  size_t survivor_limit = max_survivor_size();
-  _tenuring_threshold =
-    size_policy->compute_survivor_space_size_and_threshold(
-                                                     _survivor_overflow,
-                                                     _tenuring_threshold,
-                                                     survivor_limit);
-  size_policy->avg_young_live()->sample(used());
-  size_policy->avg_eden_live()->sample(eden()->used());
-
-  size_policy->compute_eden_space_size(eden()->capacity(), max_gen_size());
-
-  resize(size_policy->calculated_eden_size_in_bytes(),
-         size_policy->calculated_survivor_size_in_bytes());
-
-  if (UsePerfData) {
-    CMSGCAdaptivePolicyCounters* counters =
-      (CMSGCAdaptivePolicyCounters*) gch->collector_policy()->counters();
-    assert(counters->kind() ==
-           GCPolicyCounters::CMSGCAdaptivePolicyCountersKind,
-      "Wrong kind of counters");
-    counters->update_tenuring_threshold(_tenuring_threshold);
-    counters->update_survivor_overflowed(_survivor_overflow);
-    counters->update_young_capacity(capacity());
-  }
-}
-
-
-#ifndef PRODUCT
-// Changes from PSYoungGen version
-//      value of "alignment"
-void ASParNewGeneration::space_invariants() {
-  const size_t alignment = os::vm_page_size();
-
-  // Currently, our eden size cannot shrink to zero
-  guarantee(eden()->capacity() >= alignment, "eden too small");
-  guarantee(from()->capacity() >= alignment, "from too small");
-  guarantee(to()->capacity() >= alignment, "to too small");
-
-  // Relationship of spaces to each other
-  char* eden_start = (char*)eden()->bottom();
-  char* eden_end   = (char*)eden()->end();
-  char* from_start = (char*)from()->bottom();
-  char* from_end   = (char*)from()->end();
-  char* to_start   = (char*)to()->bottom();
-  char* to_end     = (char*)to()->end();
-
-  guarantee(eden_start >= virtual_space()->low(), "eden bottom");
-  guarantee(eden_start < eden_end, "eden space consistency");
-  guarantee(from_start < from_end, "from space consistency");
-  guarantee(to_start < to_end, "to space consistency");
-
-  // Check whether from space is below to space
-  if (from_start < to_start) {
-    // Eden, from, to
-    guarantee(eden_end <= from_start, "eden/from boundary");
-    guarantee(from_end <= to_start,   "from/to boundary");
-    guarantee(to_end <= virtual_space()->high(), "to end");
-  } else {
-    // Eden, to, from
-    guarantee(eden_end <= to_start, "eden/to boundary");
-    guarantee(to_end <= from_start, "to/from boundary");
-    guarantee(from_end <= virtual_space()->high(), "from end");
-  }
-
-  // More checks that the virtual space is consistent with the spaces
-  assert(virtual_space()->committed_size() >=
-    (eden()->capacity() +
-     to()->capacity() +
-     from()->capacity()), "Committed size is inconsistent");
-  assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(),
-    "Space invariant");
-  char* eden_top = (char*)eden()->top();
-  char* from_top = (char*)from()->top();
-  char* to_top = (char*)to()->top();
-  assert(eden_top <= virtual_space()->high(), "eden top");
-  assert(from_top <= virtual_space()->high(), "from top");
-  assert(to_top <= virtual_space()->high(), "to top");
-}
-#endif
--- a/hotspot/src/share/vm/gc_implementation/parNew/asParNewGeneration.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,98 +0,0 @@
-/*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
-#define SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
-
-#include "gc_implementation/parNew/parNewGeneration.hpp"
-#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
-
-// A Generation that does parallel young-gen collection extended
-// for adaptive size policy.
-
-// Division of generation into spaces
-// done by DefNewGeneration::compute_space_boundaries()
-//      +---------------+
-//      | uncommitted   |
-//      |---------------|
-//      | ss0           |
-//      |---------------|
-//      | ss1           |
-//      |---------------|
-//      |               |
-//      | eden          |
-//      |               |
-//      +---------------+       <-- low end of VirtualSpace
-//
-class ASParNewGeneration: public ParNewGeneration {
-
-  size_t _min_gen_size;
-
-  // Resize the generation based on the desired sizes of
-  // the constituent spaces.
-  bool resize_generation(size_t eden_size, size_t survivor_size);
-  // Resize the spaces based on their desired sizes but
-  // respecting the maximum size of the generation.
-  void resize_spaces(size_t eden_size, size_t survivor_size);
-  // Return the byte size remaining to the minimum generation size.
-  size_t available_to_min_gen();
-  // Return the byte size remaining to the live data in the generation.
-  size_t available_to_live() const;
-  // Return the byte size that the generation is allowed to shrink.
-  size_t limit_gen_shrink(size_t bytes);
-  // Reset the size of the spaces after a shrink of the generation.
-  void reset_survivors_after_shrink();
-
-  // Accessor
-  VirtualSpace* virtual_space() { return &_virtual_space; }
-
-  virtual void adjust_desired_tenuring_threshold();
-
- public:
-
-  ASParNewGeneration(ReservedSpace rs,
-                     size_t initial_byte_size,
-                     size_t min_byte_size,
-                     int level);
-
-  virtual const char* short_name() const { return "ASParNew"; }
-  virtual const char* name() const;
-  virtual Generation::Name kind() { return ASParNew; }
-
-  // Change the sizes of eden and the survivor spaces in
-  // the generation.  The parameters are desired sizes
-  // and are not guaranteed to be met.  For example, if
-  // the total is larger than the generation.
-  void resize(size_t eden_size, size_t survivor_size);
-
-  virtual void compute_new_size();
-
-  size_t max_gen_size()                 { return _reserved.byte_size(); }
-  size_t min_gen_size() const           { return _min_gen_size; }
-
-  // Space boundary invariant checker
-  void space_invariants() PRODUCT_RETURN;
-};
-
-#endif // SHARE_VM_GC_IMPLEMENTATION_PARNEW_ASPARNEWGENERATION_HPP
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -507,7 +507,7 @@
     // always fail (never do the print based on the interval test).
     return PrintGCDetails &&
            UseAdaptiveSizePolicy &&
-           (UseParallelGC || UseConcMarkSweepGC) &&
+           UseParallelGC &&
            (AdaptiveSizePolicyOutputInterval > 0) &&
            ((count == 0) ||
              ((count % AdaptiveSizePolicyOutputInterval) == 0));
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -216,16 +216,4 @@
   bool increment_will_decrease();
 };
 
-class GCPauseTimer : StackObj {
-  elapsedTimer* _timer;
- public:
-  GCPauseTimer(elapsedTimer* timer) {
-    _timer = timer;
-    _timer->stop();
-  }
-  ~GCPauseTimer() {
-    _timer->start();
-  }
-};
-
 #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCUTIL_HPP
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -40,10 +40,6 @@
 #include "runtime/thread.inline.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
-#include "gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp"
-#endif // INCLUDE_ALL_GCS
 
 // CollectorPolicy methods
 
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -115,7 +115,6 @@
     CollectorPolicyKind,
     GenCollectorPolicyKind,
     ConcurrentMarkSweepPolicyKind,
-    ASConcurrentMarkSweepPolicyKind,
     G1CollectorPolicyKind
   };
 
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -202,13 +202,11 @@
   guarantee(policy->is_generation_policy(), "Illegal policy type");
   DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
   assert(def_new_gen->kind() == Generation::DefNew ||
-         def_new_gen->kind() == Generation::ParNew ||
-         def_new_gen->kind() == Generation::ASParNew,
+         def_new_gen->kind() == Generation::ParNew,
          "Wrong generation kind");
 
   Generation* old_gen = get_gen(1);
   assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
-         old_gen->kind() == Generation::ASConcurrentMarkSweep ||
          old_gen->kind() == Generation::MarkSweepCompact,
     "Wrong generation kind");
 
@@ -573,9 +571,6 @@
     }
   }
 
-  AdaptiveSizePolicy* sp = gen_policy()->size_policy();
-  AdaptiveSizePolicyOutput(sp, total_collections());
-
   print_heap_after_gc();
 
 #ifdef TRACESPINNING
@@ -724,8 +719,7 @@
 #if INCLUDE_ALL_GCS
 bool GenCollectedHeap::create_cms_collector() {
 
-  assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
-         (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
+  assert(_gens[1]->kind() == Generation::ConcurrentMarkSweep,
          "Unexpected generation kinds");
   // Skip two header words in the block content verification
   NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
--- a/hotspot/src/share/vm/memory/generation.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/generation.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -154,8 +154,7 @@
 
 DefNewGeneration* Generation::as_DefNewGeneration() {
   assert((kind() == Generation::DefNew) ||
-         (kind() == Generation::ParNew) ||
-         (kind() == Generation::ASParNew),
+         (kind() == Generation::ParNew),
     "Wrong youngest generation type");
   return (DefNewGeneration*) this;
 }
--- a/hotspot/src/share/vm/memory/generation.hpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/generation.hpp	Wed Jul 02 17:24:18 2014 -0700
@@ -131,8 +131,6 @@
  public:
   // The set of possible generation kinds.
   enum Name {
-    ASParNew,
-    ASConcurrentMarkSweep,
     DefNew,
     ParNew,
     MarkSweepCompact,
--- a/hotspot/src/share/vm/memory/generationSpec.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/generationSpec.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -32,7 +32,6 @@
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/parNew/asParNewGeneration.hpp"
 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #endif // INCLUDE_ALL_GCS
@@ -50,12 +49,6 @@
     case Generation::ParNew:
       return new ParNewGeneration(rs, init_size(), level);
 
-    case Generation::ASParNew:
-      return new ASParNewGeneration(rs,
-                                    init_size(),
-                                    init_size() /* min size */,
-                                    level);
-
     case Generation::ConcurrentMarkSweep: {
       assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
       CardTableRS* ctrs = remset->as_CardTableRS();
@@ -75,26 +68,6 @@
 
       return g;
     }
-
-    case Generation::ASConcurrentMarkSweep: {
-      assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
-      CardTableRS* ctrs = remset->as_CardTableRS();
-      if (ctrs == NULL) {
-        vm_exit_during_initialization("Rem set incompatibility.");
-      }
-      // Otherwise
-      // The constructor creates the CMSCollector if needed,
-      // else registers with an existing CMSCollector
-
-      ASConcurrentMarkSweepGeneration* g = NULL;
-      g = new ASConcurrentMarkSweepGeneration(rs,
-                 init_size(), level, ctrs, UseCMSAdaptiveFreeLists,
-                 (FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
-
-      g->initialize_performance_counters();
-
-      return g;
-    }
 #endif // INCLUDE_ALL_GCS
 
     default:
--- a/hotspot/src/share/vm/memory/space.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/space.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -685,14 +685,8 @@
 // This version requires locking.
 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
                                                 HeapWord* const end_value) {
-  // In G1 there are places where a GC worker can allocates into a
-  // region using this serial allocation code without being prone to a
-  // race with other GC workers (we ensure that no other GC worker can
-  // access the same region at the same time). So the assert below is
-  // too strong in the case of G1.
   assert(Heap_lock->owned_by_self() ||
-         (SafepointSynchronize::is_at_safepoint() &&
-                               (Thread::current()->is_VM_thread() || UseG1GC)),
+         (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
          "not locked");
   HeapWord* obj = top();
   if (pointer_delta(end_value, obj) >= size) {
--- a/hotspot/src/share/vm/memory/universe.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/memory/universe.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -72,7 +72,7 @@
 #include "utilities/preserveException.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
-#include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
+#include "gc_implementation/shared/adaptiveSizePolicy.hpp"
 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
@@ -802,13 +802,9 @@
       gc_policy = new MarkSweepPolicy();
     } else if (UseConcMarkSweepGC) {
 #if INCLUDE_ALL_GCS
-      if (UseAdaptiveSizePolicy) {
-        gc_policy = new ASConcurrentMarkSweepPolicy();
-      } else {
-        gc_policy = new ConcurrentMarkSweepPolicy();
-      }
+      gc_policy = new ConcurrentMarkSweepPolicy();
 #else  // INCLUDE_ALL_GCS
-    fatal("UseConcMarkSweepGC not supported in this VM.");
+      fatal("UseConcMarkSweepGC not supported in this VM.");
 #endif // INCLUDE_ALL_GCS
     } else { // default old generation
       gc_policy = new MarkSweepPolicy();
--- a/hotspot/src/share/vm/services/memoryService.cpp	Wed Jul 02 13:52:52 2014 +0200
+++ b/hotspot/src/share/vm/services/memoryService.cpp	Wed Jul 02 17:24:18 2014 -0700
@@ -136,7 +136,6 @@
         break;
 #if INCLUDE_ALL_GCS
       case Generation::ParNew:
-      case Generation::ASParNew:
         _minor_gc_manager = MemoryManager::get_parnew_memory_manager();
         break;
 #endif // INCLUDE_ALL_GCS
@@ -268,7 +267,6 @@
 
 #if INCLUDE_ALL_GCS
     case Generation::ParNew:
-    case Generation::ASParNew:
     {
       assert(major_mgr != NULL && minor_mgr != NULL, "Should have two managers");
       // Add a memory pool for each space and young gen doesn't
@@ -300,7 +298,6 @@
 
 #if INCLUDE_ALL_GCS
     case Generation::ConcurrentMarkSweep:
-    case Generation::ASConcurrentMarkSweep:
     {
       assert(major_mgr != NULL && minor_mgr == NULL, "Should have only one manager");
       ConcurrentMarkSweepGeneration* cms = (ConcurrentMarkSweepGeneration*) gen;
@@ -548,23 +545,20 @@
 // GC manager type depends on the type of Generation. Depending on the space
 // availablity and vm options the gc uses major gc manager or minor gc
 // manager or both. The type of gc manager depends on the generation kind.
-// For DefNew, ParNew and ASParNew generation doing scavenge gc uses minor
-// gc manager (so _fullGC is set to false ) and for other generation kinds
-// doing mark-sweep-compact uses major gc manager (so _fullGC is set
-// to true).
+// For DefNew and ParNew generation doing scavenge gc uses minor gc manager (so
+// _fullGC is set to false ) and for other generation kinds doing
+// mark-sweep-compact uses major gc manager (so _fullGC is set to true).
 TraceMemoryManagerStats::TraceMemoryManagerStats(Generation::Name kind, GCCause::Cause cause) {
   switch (kind) {
     case Generation::DefNew:
 #if INCLUDE_ALL_GCS
     case Generation::ParNew:
-    case Generation::ASParNew:
 #endif // INCLUDE_ALL_GCS
       _fullGC=false;
       break;
     case Generation::MarkSweepCompact:
 #if INCLUDE_ALL_GCS
     case Generation::ConcurrentMarkSweep:
-    case Generation::ASConcurrentMarkSweep:
 #endif // INCLUDE_ALL_GCS
       _fullGC=true;
       break;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/g1/TestDeferredRSUpdate.java	Wed Jul 02 17:24:18 2014 -0700
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestDeferredRSUpdate
+ * @bug 8040977
+ * @summary Ensure that running with -XX:-G1DeferredRSUpdate does not crash the VM
+ * @key gc
+ * @library /testlibrary
+ */
+
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+public class TestDeferredRSUpdate {
+  public static void main(String[] args) throws Exception {
+    GCTest.main(args);
+
+    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC",
+                                                              "-Xmx10M",
+                                                              // G1DeferredRSUpdate is a develop option, but we cannot limit execution of this test to only debug VMs.
+                                                              "-XX:+IgnoreUnrecognizedVMOptions",
+                                                              "-XX:-G1DeferredRSUpdate",
+                                                              GCTest.class.getName());
+
+    OutputAnalyzer output = new OutputAnalyzer(pb.start());
+    output.shouldHaveExitValue(0);
+  }
+
+  static class GCTest {
+    private static Object[] garbage = new Object[32];
+
+    public static void main(String [] args) {
+      System.out.println("Creating garbage");
+      // Create 128MB of garbage. This should result in at least one minor GC, with
+      // some objects copied to old gen. As references from old to young are installed,
+      // the crash due to the use before initialize occurs.
+      Object prev = null;
+      Object prevPrev = null;
+      for (int i = 0; i < 1024; i++) {
+        Object[] next = new Object[32 * 1024];
+        next[0] = prev;
+        next[1] = prevPrev;
+
+        Object[] cur = (Object[]) garbage[i % garbage.length];
+        if (cur != null) {
+          cur[0] = null;
+          cur[1] = null;
+        }
+        garbage[i % garbage.length] = next;
+
+        prevPrev = prev;
+        prev = next;
+      }
+      System.out.println("Done");
+    }
+  }
+}