Merge
authoramurillo
Thu, 22 Oct 2015 16:25:34 -0700
changeset 33231 a6d6dd711998
parent 33202 643e2fbeccc3 (current diff)
parent 33230 23bb11a5cf4e (diff)
child 33232 75e0112d3eb5
Merge
hotspot/src/share/vm/gc/shared/genRemSet.cpp
hotspot/src/share/vm/gc/shared/genRemSet.hpp
hotspot/src/share/vm/gc/shared/watermark.hpp
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Thu Oct 22 16:25:34 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,21 +52,19 @@
   }
 
   private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
-    Type type                  = db.lookupType("Method");
+    type                       = db.lookupType("Method");
     constMethod                = type.getAddressField("_constMethod");
     methodData                 = type.getAddressField("_method_data");
     methodCounters             = type.getAddressField("_method_counters");
-    methodSize                 = new CIntField(type.getCIntegerField("_method_size"), 0);
     accessFlags                = new CIntField(type.getCIntegerField("_access_flags"), 0);
     code                       = type.getAddressField("_code");
     vtableIndex                = new CIntField(type.getCIntegerField("_vtable_index"), 0);
-    bytecodeOffset = type.getSize();
 
     /*
-    interpreterEntry           = type.getAddressField("_interpreter_entry");
     fromCompiledCodeEntryPoint = type.getAddressField("_from_compiled_code_entry_point");
+    interpreterEntry           = type.getAddressField("_from_interpreted_entry");
+    */
 
-    */
     objectInitializerName = null;
     classInitializerName = null;
   }
@@ -77,16 +75,22 @@
 
   public boolean isMethod()            { return true; }
 
+  // Not a Method field, used to keep type.
+  private static Type type;
+
   // Fields
   private static AddressField  constMethod;
   private static AddressField  methodData;
   private static AddressField  methodCounters;
-  private static CIntField methodSize;
   private static CIntField accessFlags;
   private static CIntField vtableIndex;
-  private static long      bytecodeOffset;
 
   private static AddressField       code;
+  /*
+  private static AddressCField      fromCompiledCodeEntryPoint;
+  private static AddressField       interpreterEntry;
+  */
+
 
   // constant method names - <init>, <clinit>
   // Initialized lazily to avoid initialization ordering dependencies between Method and SymbolTable
@@ -106,11 +110,6 @@
   }
 
 
-  /*
-  private static AddressCField       interpreterEntry;
-  private static AddressCField       fromCompiledCodeEntryPoint;
-  */
-
   // Accessors for declared fields
   public ConstMethod  getConstMethod()                {
     Address addr = constMethod.getValue(getAddress());
@@ -128,7 +127,6 @@
     return (MethodCounters) VMObjectFactory.newObject(MethodCounters.class, addr);
   }
   /** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
-  public long         getMethodSize()                 { return                methodSize.getValue(this);        }
   public long         getMaxStack()                   { return                getConstMethod().getMaxStack();   }
   public long         getMaxLocals()                  { return                getConstMethod().getMaxLocals();         }
   public long         getSizeOfParameters()           { return                getConstMethod().getSizeOfParameters();  }
@@ -265,7 +263,7 @@
   }
 
   public long getSize() {
-    return getMethodSize();
+    return type.getSize() + (isNative() ? 2: 0);
   }
 
   public void printValueOn(PrintStream tty) {
@@ -273,7 +271,6 @@
   }
 
   public void iterateFields(MetadataVisitor visitor) {
-      visitor.doCInt(methodSize, true);
       visitor.doCInt(accessFlags, true);
     }
 
--- a/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/cpu/aarch64/vm/globals_aarch64.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -55,10 +55,17 @@
 define_pd_global(intx, OptoLoopAlignment,        16);
 define_pd_global(intx, InlineFrequencyCount,     100);
 
-define_pd_global(intx, StackYellowPages, 2);
-define_pd_global(intx, StackRedPages, 1);
+#define DEFAULT_STACK_YELLOW_PAGES (2)
+#define DEFAULT_STACK_RED_PAGES (1)
+#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
 
-define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
+#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
+#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
+#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
+
+define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
+define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
+define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/cpu/ppc/vm/globals_ppc.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -41,6 +41,18 @@
 define_pd_global(bool, TrapBasedNullChecks,   true);
 define_pd_global(bool, UncommonNullCast,      true);  // Uncommon-trap NULLs passed to check cast.
 
+#define DEFAULT_STACK_YELLOW_PAGES (6)
+#define DEFAULT_STACK_RED_PAGES (1)
+#define DEFAULT_STACK_SHADOW_PAGES (6 DEBUG_ONLY(+2))
+
+#define MIN_STACK_YELLOW_PAGES (1)
+#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
+#define MIN_STACK_SHADOW_PAGES (1)
+
+define_pd_global(intx, StackYellowPages,      DEFAULT_STACK_YELLOW_PAGES);
+define_pd_global(intx, StackRedPages,         DEFAULT_STACK_RED_PAGES);
+define_pd_global(intx, StackShadowPages,      DEFAULT_STACK_SHADOW_PAGES);
+
 // Use large code-entry alignment.
 define_pd_global(intx, CodeEntryAlignment,    128);
 define_pd_global(intx, OptoLoopAlignment,     16);
--- a/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/cpu/sparc/vm/globals_sparc.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -52,19 +52,27 @@
 define_pd_global(intx, InlineFrequencyCount,  50);  // we can use more inlining on the SPARC
 define_pd_global(intx, InlineSmallCode,       1500);
 
+#define DEFAULT_STACK_YELLOW_PAGES (2)
+#define DEFAULT_STACK_RED_PAGES (1)
+
 #ifdef _LP64
 // Stack slots are 2X larger in LP64 than in the 32 bit VM.
 define_pd_global(intx, ThreadStackSize,       1024);
 define_pd_global(intx, VMThreadStackSize,     1024);
-define_pd_global(intx, StackShadowPages, 10 DEBUG_ONLY(+1));
+#define DEFAULT_STACK_SHADOW_PAGES (10 DEBUG_ONLY(+1))
 #else
 define_pd_global(intx, ThreadStackSize,       512);
 define_pd_global(intx, VMThreadStackSize,     512);
-define_pd_global(intx, StackShadowPages, 3 DEBUG_ONLY(+1));
-#endif
+#define DEFAULT_STACK_SHADOW_PAGES (3 DEBUG_ONLY(+1))
+#endif // _LP64
 
-define_pd_global(intx, StackYellowPages, 2);
-define_pd_global(intx, StackRedPages, 1);
+#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
+#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
+#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
+
+define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
+define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
+define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
 
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
--- a/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/cpu/x86/vm/globals_x86.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -55,16 +55,28 @@
 define_pd_global(intx, InlineFrequencyCount,     100);
 define_pd_global(intx, InlineSmallCode,          1000);
 
-define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
-define_pd_global(intx, StackRedPages, 1);
+#define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
+#define DEFAULT_STACK_RED_PAGES (1)
+
+#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
+#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
+
 #ifdef AMD64
 // Very large C++ stack frames using solaris-amd64 optimized builds
 // due to lack of optimization caused by C++ compiler bugs
-define_pd_global(intx, StackShadowPages, NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2));
+#define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(6) DEBUG_ONLY(+2))
+// For those clients that do not use write socket, we allow
+// the min range value to be below that of the default
+#define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(6) DEBUG_ONLY(+2))
 #else
-define_pd_global(intx, StackShadowPages, 4 DEBUG_ONLY(+5));
+#define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
+#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
 #endif // AMD64
 
+define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
+define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
+define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
+
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
 
@@ -135,6 +147,7 @@
                                                                             \
   product(uintx, RTMRetryCount, 5,                                          \
           "Number of RTM retries on lock abort or busy")                    \
+          range(0, max_uintx)                                               \
                                                                             \
   experimental(intx, RTMSpinLoopCount, 100,                                 \
           "Spin count for lock to become free before RTM retry")            \
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -45,9 +45,17 @@
 define_pd_global(intx,  InlineFrequencyCount, 100);
 define_pd_global(intx,  InlineSmallCode,      1000 );
 
-define_pd_global(intx,  StackYellowPages,     2);
-define_pd_global(intx,  StackRedPages,        1);
-define_pd_global(intx,  StackShadowPages,     5 LP64_ONLY(+1) DEBUG_ONLY(+3));
+#define DEFAULT_STACK_YELLOW_PAGES (2)
+#define DEFAULT_STACK_RED_PAGES (1)
+#define DEFAULT_STACK_SHADOW_PAGES (5 LP64_ONLY(+1) DEBUG_ONLY(+3))
+
+#define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
+#define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
+#define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
+
+define_pd_global(intx,  StackYellowPages,     DEFAULT_STACK_YELLOW_PAGES);
+define_pd_global(intx,  StackRedPages,        DEFAULT_STACK_RED_PAGES);
+define_pd_global(intx,  StackShadowPages,     DEFAULT_STACK_SHADOW_PAGES);
 
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
--- a/hotspot/src/os/aix/vm/globals_aix.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/os/aix/vm/globals_aix.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -39,15 +39,16 @@
   /* a scarce resource and there may be situations where we do not want the VM */   \
   /* to run with 16M pages. (Will fall back to 64K pages).                     */   \
   product_pd(bool, Use16MPages,                                                     \
-          "Use 16M pages if available.")                                            \
+             "Use 16M pages if available.")                                         \
                                                                                     \
   /*  use optimized addresses for the polling page, */                              \
   /* e.g. map it to a special 32-bit address.       */                              \
   product_pd(bool, OptimizePollingPageLocation,                                     \
-          "Optimize the location of the polling page used for Safepoints")          \
+             "Optimize the location of the polling page used for Safepoints")       \
                                                                                     \
   product_pd(intx, AttachListenerTimeout,                                           \
-          "Timeout in ms the attach listener waits for a request")                  \
+             "Timeout in ms the attach listener waits for a request")               \
+             range(0, 2147483)                                                      \
                                                                                     \
 
 // Per default, do not allow 16M pages. 16M pages have to be switched on specifically.
--- a/hotspot/src/os_cpu/aix_ppc/vm/globals_aix_ppc.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/os_cpu/aix_ppc/vm/globals_aix_ppc.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -42,10 +42,6 @@
 // Allow extra space in DEBUG builds for asserts.
 define_pd_global(size_t, JVMInvokeMethodSlack,   8192);
 
-define_pd_global(intx, StackYellowPages,         6);
-define_pd_global(intx, StackRedPages,            1);
-define_pd_global(intx, StackShadowPages,         6 DEBUG_ONLY(+2));
-
 // Only used on 64 bit platforms
 define_pd_global(size_t, HeapBaseMinAddress,     2*G);
 
--- a/hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/os_cpu/linux_ppc/vm/globals_linux_ppc.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -42,10 +42,6 @@
 // Allow extra space in DEBUG builds for asserts.
 define_pd_global(size_t, JVMInvokeMethodSlack,   8192);
 
-define_pd_global(intx, StackYellowPages,         6);
-define_pd_global(intx, StackRedPages,            1);
-define_pd_global(intx, StackShadowPages,         6 DEBUG_ONLY(+2));
-
 // Only used on 64 bit platforms
 define_pd_global(size_t, HeapBaseMinAddress,     2*G);
 
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -554,7 +554,7 @@
                     exception->print_value_string(), p2i((address)exception()), nm->method()->print_value_string(), p2i(pc), p2i(thread));
     }
     // for AbortVMOnException flag
-    NOT_PRODUCT(Exceptions::debug_check_abort(exception));
+    Exceptions::debug_check_abort(exception);
 
     // Clear out the exception oop and pc since looking up an
     // exception handler can cause class loading, which might throw an
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -2003,6 +2003,10 @@
     verify_legal_method_modifiers(flags, is_interface, name, CHECK_(nullHandle));
   }
 
+  if (name == vmSymbols::object_initializer_name() && is_interface) {
+    classfile_parse_error("Interface cannot have a method named <init>, class file %s", CHECK_(nullHandle));
+  }
+
   int args_size = -1;  // only used when _need_verify is true
   if (_need_verify) {
     args_size = ((flags & JVM_ACC_STATIC) ? 0 : 1) +
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -1579,11 +1579,9 @@
             return;
           }
           // Make sure "this" has been initialized if current method is an
-          // <init>.  Note that "<init>" methods in interfaces are just
-          // normal methods.  Interfaces cannot have ctors.
+          // <init>.
           if (_method->name() == vmSymbols::object_initializer_name() &&
-              current_frame.flag_this_uninit() &&
-              !current_class()->is_interface()) {
+              current_frame.flag_this_uninit()) {
             verify_error(ErrorContext::bad_code(bci),
                          "Constructor must call super() or this() "
                          "before return");
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -6051,8 +6051,8 @@
     _span(span),
     _bitMap(bitMap)
 {
-    assert(_ref_processor == NULL, "deliberately left NULL");
-    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
+  assert(ref_processor() == NULL, "deliberately left NULL");
+  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
 }
 
 void MarkRefsIntoClosure::do_oop(oop obj) {
@@ -6073,8 +6073,8 @@
     _span(span),
     _bitMap(bitMap)
 {
-    assert(_ref_processor == NULL, "deliberately left NULL");
-    assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
+  assert(ref_processor() == NULL, "deliberately left NULL");
+  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
 }
 
 void Par_MarkRefsIntoClosure::do_oop(oop obj) {
@@ -6097,8 +6097,8 @@
     _verification_bm(verification_bm),
     _cms_bm(cms_bm)
 {
-    assert(_ref_processor == NULL, "deliberately left NULL");
-    assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
+  assert(ref_processor() == NULL, "deliberately left NULL");
+  assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
 }
 
 void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
@@ -6140,8 +6140,9 @@
   _concurrent_precleaning(concurrent_precleaning),
   _freelistLock(NULL)
 {
-  _ref_processor = rp;
-  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+  // FIXME: Should initialize in base class constructor.
+  assert(rp != NULL, "ref_processor shouldn't be NULL");
+  set_ref_processor_internal(rp);
 }
 
 // This closure is used to mark refs into the CMS generation at the
@@ -6246,8 +6247,9 @@
                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
   _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
 {
-  _ref_processor = rp;
-  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+  // FIXME: Should initialize in base class constructor.
+  assert(rp != NULL, "ref_processor shouldn't be NULL");
+  set_ref_processor_internal(rp);
 }
 
 // This closure is used to mark refs into the CMS generation at the
@@ -7097,7 +7099,7 @@
   _mark_stack(mark_stack),
   _concurrent_precleaning(concurrent_precleaning)
 {
-  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
 }
 
 // Grey object rescan during pre-cleaning and second checkpoint phases --
@@ -7168,7 +7170,7 @@
   _bit_map(bit_map),
   _work_queue(work_queue)
 {
-  assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+  assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
 }
 
 void PushAndMarkClosure::do_oop(oop* p)       { PushAndMarkClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/concurrentG1Refine.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -112,6 +112,8 @@
   int thread_threshold_step() const { return _thread_threshold_step; }
 
   G1HotCardCache* hot_card_cache() { return &_hot_card_cache; }
+
+  static bool hot_card_cache_enabled() { return G1HotCardCache::default_use_cache(); }
 };
 
 #endif // SHARE_VM_GC_G1_CONCURRENTG1REFINE_HPP
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -3084,17 +3084,21 @@
   }
 };
 
+static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
+  ReferenceProcessor* result = NULL;
+  if (G1UseConcMarkReferenceProcessing) {
+    result = g1h->ref_processor_cm();
+    assert(result != NULL, "should not be NULL");
+  }
+  return result;
+}
+
 G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
                                ConcurrentMark* cm,
                                CMTask* task)
-  : _g1h(g1h), _cm(cm), _task(task) {
-  assert(_ref_processor == NULL, "should be initialized to NULL");
-
-  if (G1UseConcMarkReferenceProcessing) {
-    _ref_processor = g1h->ref_processor_cm();
-    assert(_ref_processor != NULL, "should not be NULL");
-  }
-}
+  : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
+    _g1h(g1h), _cm(cm), _task(task)
+{ }
 
 void CMTask::setup_for_region(HeapRegion* hr) {
   assert(hr != NULL,
@@ -3731,8 +3735,7 @@
   // and do_marking_step() is not being called serially.
   bool do_stealing = do_termination && !is_serial;
 
-  double diff_prediction_ms =
-    g1_policy->get_new_prediction(&_marking_step_diffs_ms);
+  double diff_prediction_ms = _g1h->g1_policy()->predictor().get_new_prediction(&_marking_step_diffs_ms);
   _time_target_ms = time_target_ms - diff_prediction_ms;
 
   // set up the variables that are used in the work-based scheme to
--- a/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CodeBlobClosure.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -22,6 +22,9 @@
  *
  */
 
+#ifndef SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
+#define SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
+
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "memory/iterator.hpp"
 
@@ -53,3 +56,6 @@
 
   void do_code_blob(CodeBlob* cb);
 };
+
+#endif // SHARE_VM_GC_G1_G1CODEBLOBCLOSURE_HPP
+
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -44,6 +44,7 @@
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/g1/g1YCTypes.hpp"
@@ -125,213 +126,6 @@
   size_t num_processed() const { return _num_processed; }
 };
 
-YoungList::YoungList(G1CollectedHeap* g1h) :
-    _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
-    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
-  guarantee(check_list_empty(false), "just making sure...");
-}
-
-void YoungList::push_region(HeapRegion *hr) {
-  assert(!hr->is_young(), "should not already be young");
-  assert(hr->get_next_young_region() == NULL, "cause it should!");
-
-  hr->set_next_young_region(_head);
-  _head = hr;
-
-  _g1h->g1_policy()->set_region_eden(hr, (int) _length);
-  ++_length;
-}
-
-void YoungList::add_survivor_region(HeapRegion* hr) {
-  assert(hr->is_survivor(), "should be flagged as survivor region");
-  assert(hr->get_next_young_region() == NULL, "cause it should!");
-
-  hr->set_next_young_region(_survivor_head);
-  if (_survivor_head == NULL) {
-    _survivor_tail = hr;
-  }
-  _survivor_head = hr;
-  ++_survivor_length;
-}
-
-void YoungList::empty_list(HeapRegion* list) {
-  while (list != NULL) {
-    HeapRegion* next = list->get_next_young_region();
-    list->set_next_young_region(NULL);
-    list->uninstall_surv_rate_group();
-    // This is called before a Full GC and all the non-empty /
-    // non-humongous regions at the end of the Full GC will end up as
-    // old anyway.
-    list->set_old();
-    list = next;
-  }
-}
-
-void YoungList::empty_list() {
-  assert(check_list_well_formed(), "young list should be well formed");
-
-  empty_list(_head);
-  _head = NULL;
-  _length = 0;
-
-  empty_list(_survivor_head);
-  _survivor_head = NULL;
-  _survivor_tail = NULL;
-  _survivor_length = 0;
-
-  _last_sampled_rs_lengths = 0;
-
-  assert(check_list_empty(false), "just making sure...");
-}
-
-bool YoungList::check_list_well_formed() {
-  bool ret = true;
-
-  uint length = 0;
-  HeapRegion* curr = _head;
-  HeapRegion* last = NULL;
-  while (curr != NULL) {
-    if (!curr->is_young()) {
-      gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
-                             "incorrectly tagged (y: %d, surv: %d)",
-                             p2i(curr->bottom()), p2i(curr->end()),
-                             curr->is_young(), curr->is_survivor());
-      ret = false;
-    }
-    ++length;
-    last = curr;
-    curr = curr->get_next_young_region();
-  }
-  ret = ret && (length == _length);
-
-  if (!ret) {
-    gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
-    gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
-                           length, _length);
-  }
-
-  return ret;
-}
-
-bool YoungList::check_list_empty(bool check_sample) {
-  bool ret = true;
-
-  if (_length != 0) {
-    gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
-                  _length);
-    ret = false;
-  }
-  if (check_sample && _last_sampled_rs_lengths != 0) {
-    gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
-    ret = false;
-  }
-  if (_head != NULL) {
-    gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
-    ret = false;
-  }
-  if (!ret) {
-    gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
-  }
-
-  return ret;
-}
-
-void
-YoungList::rs_length_sampling_init() {
-  _sampled_rs_lengths = 0;
-  _curr               = _head;
-}
-
-bool
-YoungList::rs_length_sampling_more() {
-  return _curr != NULL;
-}
-
-void
-YoungList::rs_length_sampling_next() {
-  assert( _curr != NULL, "invariant" );
-  size_t rs_length = _curr->rem_set()->occupied();
-
-  _sampled_rs_lengths += rs_length;
-
-  // The current region may not yet have been added to the
-  // incremental collection set (it gets added when it is
-  // retired as the current allocation region).
-  if (_curr->in_collection_set()) {
-    // Update the collection set policy information for this region
-    _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
-  }
-
-  _curr = _curr->get_next_young_region();
-  if (_curr == NULL) {
-    _last_sampled_rs_lengths = _sampled_rs_lengths;
-    // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
-  }
-}
-
-void
-YoungList::reset_auxilary_lists() {
-  guarantee( is_empty(), "young list should be empty" );
-  assert(check_list_well_formed(), "young list should be well formed");
-
-  // Add survivor regions to SurvRateGroup.
-  _g1h->g1_policy()->note_start_adding_survivor_regions();
-  _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
-
-  int young_index_in_cset = 0;
-  for (HeapRegion* curr = _survivor_head;
-       curr != NULL;
-       curr = curr->get_next_young_region()) {
-    _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
-
-    // The region is a non-empty survivor so let's add it to
-    // the incremental collection set for the next evacuation
-    // pause.
-    _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
-    young_index_in_cset += 1;
-  }
-  assert((uint) young_index_in_cset == _survivor_length, "post-condition");
-  _g1h->g1_policy()->note_stop_adding_survivor_regions();
-
-  _head   = _survivor_head;
-  _length = _survivor_length;
-  if (_survivor_head != NULL) {
-    assert(_survivor_tail != NULL, "cause it shouldn't be");
-    assert(_survivor_length > 0, "invariant");
-    _survivor_tail->set_next_young_region(NULL);
-  }
-
-  // Don't clear the survivor list handles until the start of
-  // the next evacuation pause - we need it in order to re-tag
-  // the survivor regions from this evacuation pause as 'young'
-  // at the start of the next.
-
-  _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
-
-  assert(check_list_well_formed(), "young list should be well formed");
-}
-
-void YoungList::print() {
-  HeapRegion* lists[] = {_head,   _survivor_head};
-  const char* names[] = {"YOUNG", "SURVIVOR"};
-
-  for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
-    gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
-    HeapRegion *curr = lists[list];
-    if (curr == NULL)
-      gclog_or_tty->print_cr("  empty");
-    while (curr != NULL) {
-      gclog_or_tty->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
-                             HR_FORMAT_PARAMS(curr),
-                             p2i(curr->prev_top_at_mark_start()),
-                             p2i(curr->next_top_at_mark_start()),
-                             curr->age_in_surv_rate_group_cond());
-      curr = curr->get_next_young_region();
-    }
-  }
-
-  gclog_or_tty->cr();
-}
 
 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
   HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
@@ -2469,14 +2263,11 @@
 }
 #endif // PRODUCT
 
-void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
-                                                 DirtyCardQueue* into_cset_dcq,
-                                                 bool concurrent,
-                                                 uint worker_i) {
-  // Clean cards in the hot card cache
-  G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
-  hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
-
+void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
+  _cg1r->hot_card_cache()->drain(cl, worker_i);
+}
+
+void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
   size_t n_completed_buffers = 0;
   while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
@@ -4355,80 +4146,6 @@
   }
 }
 
-void G1ParCopyHelper::mark_object(oop obj) {
-  assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
-
-  // We know that the object is not moving so it's safe to read its size.
-  _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
-}
-
-void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
-  assert(from_obj->is_forwarded(), "from obj should be forwarded");
-  assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
-  assert(from_obj != to_obj, "should not be self-forwarded");
-
-  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
-  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
-
-  // The object might be in the process of being copied by another
-  // worker so we cannot trust that its to-space image is
-  // well-formed. So we have to read its size from its from-space
-  // image which we know should not be changing.
-  _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
-}
-
-template <class T>
-void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
-  if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
-    _scanned_klass->record_modified_oops();
-  }
-}
-
-template <G1Barrier barrier, G1Mark do_mark_object>
-template <class T>
-void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
-  T heap_oop = oopDesc::load_heap_oop(p);
-
-  if (oopDesc::is_null(heap_oop)) {
-    return;
-  }
-
-  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-
-  assert(_worker_id == _par_scan_state->worker_id(), "sanity");
-
-  const InCSetState state = _g1->in_cset_state(obj);
-  if (state.is_in_cset()) {
-    oop forwardee;
-    markOop m = obj->mark();
-    if (m->is_marked()) {
-      forwardee = (oop) m->decode_pointer();
-    } else {
-      forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
-    }
-    assert(forwardee != NULL, "forwardee should not be NULL");
-    oopDesc::encode_store_heap_oop(p, forwardee);
-    if (do_mark_object != G1MarkNone && forwardee != obj) {
-      // If the object is self-forwarded we don't need to explicitly
-      // mark it, the evacuation failure protocol will do so.
-      mark_forwarded_object(obj, forwardee);
-    }
-
-    if (barrier == G1BarrierKlass) {
-      do_klass_barrier(p, forwardee);
-    }
-  } else {
-    if (state.is_humongous()) {
-      _g1->set_humongous_is_live(obj);
-    }
-    // The object is not in collection set. If we're a root scanning
-    // closure during an initial mark pause then attempt to mark the object.
-    if (do_mark_object == G1MarkFromRoot) {
-      mark_object(obj);
-    }
-  }
-}
-
 class G1ParEvacuateFollowersClosure : public VoidClosure {
 private:
   double _start_term;
@@ -4481,32 +4198,6 @@
   } while (!offer_termination());
 }
 
-class G1KlassScanClosure : public KlassClosure {
- G1ParCopyHelper* _closure;
- bool             _process_only_dirty;
- int              _count;
- public:
-  G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
-      : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
-  void do_klass(Klass* klass) {
-    // If the klass has not been dirtied we know that there's
-    // no references into  the young gen and we can skip it.
-   if (!_process_only_dirty || klass->has_modified_oops()) {
-      // Clean the klass since we're going to scavenge all the metadata.
-      klass->clear_modified_oops();
-
-      // Tell the closure that this klass is the Klass to scavenge
-      // and is the one to dirty if oops are left pointing into the young gen.
-      _closure->set_scanned_klass(klass);
-
-      klass->oops_do(_closure);
-
-      _closure->set_scanned_klass(NULL);
-    }
-    _count++;
-  }
-};
-
 class G1ParTask : public AbstractGangTask {
 protected:
   G1CollectedHeap*         _g1h;
@@ -4527,42 +4218,6 @@
       _n_workers(n_workers)
   {}
 
-  RefToScanQueueSet* queues() { return _queues; }
-
-  RefToScanQueue *work_queue(int i) {
-    return queues()->queue(i);
-  }
-
-  ParallelTaskTerminator* terminator() { return &_terminator; }
-
-  // Helps out with CLD processing.
-  //
-  // During InitialMark we need to:
-  // 1) Scavenge all CLDs for the young GC.
-  // 2) Mark all objects directly reachable from strong CLDs.
-  template <G1Mark do_mark_object>
-  class G1CLDClosure : public CLDClosure {
-    G1ParCopyClosure<G1BarrierNone,  do_mark_object>* _oop_closure;
-    G1ParCopyClosure<G1BarrierKlass, do_mark_object>  _oop_in_klass_closure;
-    G1KlassScanClosure                                _klass_in_cld_closure;
-    bool                                              _claim;
-
-   public:
-    G1CLDClosure(G1ParCopyClosure<G1BarrierNone, do_mark_object>* oop_closure,
-                 bool only_young, bool claim)
-        : _oop_closure(oop_closure),
-          _oop_in_klass_closure(oop_closure->g1(),
-                                oop_closure->pss()),
-          _klass_in_cld_closure(&_oop_in_klass_closure, only_young),
-          _claim(claim) {
-
-    }
-
-    void do_cld(ClassLoaderData* cld) {
-      cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
-    }
-  };
-
   void work(uint worker_id) {
     if (worker_id >= _n_workers) return;  // no work needed this round
 
@@ -4578,62 +4233,18 @@
       G1ParScanThreadState*           pss = _pss->state_for_worker(worker_id);
       pss->set_ref_processor(rp);
 
-      bool only_young = _g1h->collector_state()->gcs_are_young();
-
-      // Non-IM young GC.
-      G1ParCopyClosure<G1BarrierNone, G1MarkNone>             scan_only_root_cl(_g1h, pss);
-      G1CLDClosure<G1MarkNone>                                scan_only_cld_cl(&scan_only_root_cl,
-                                                                               only_young, // Only process dirty klasses.
-                                                                               false);     // No need to claim CLDs.
-      // IM young GC.
-      //    Strong roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot>         scan_mark_root_cl(_g1h, pss);
-      G1CLDClosure<G1MarkFromRoot>                            scan_mark_cld_cl(&scan_mark_root_cl,
-                                                                               false, // Process all klasses.
-                                                                               true); // Need to claim CLDs.
-      //    Weak roots closures.
-      G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss);
-      G1CLDClosure<G1MarkPromotedFromRoot>                    scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
-                                                                                    false, // Process all klasses.
-                                                                                    true); // Need to claim CLDs.
-
-      OopClosure* strong_root_cl;
-      OopClosure* weak_root_cl;
-      CLDClosure* strong_cld_cl;
-      CLDClosure* weak_cld_cl;
-
-      bool trace_metadata = false;
-
-      if (_g1h->collector_state()->during_initial_mark_pause()) {
-        // We also need to mark copied objects.
-        strong_root_cl = &scan_mark_root_cl;
-        strong_cld_cl  = &scan_mark_cld_cl;
-        if (ClassUnloadingWithConcurrentMark) {
-          weak_root_cl = &scan_mark_weak_root_cl;
-          weak_cld_cl  = &scan_mark_weak_cld_cl;
-          trace_metadata = true;
-        } else {
-          weak_root_cl = &scan_mark_root_cl;
-          weak_cld_cl  = &scan_mark_cld_cl;
-        }
-      } else {
-        strong_root_cl = &scan_only_root_cl;
-        weak_root_cl   = &scan_only_root_cl;
-        strong_cld_cl  = &scan_only_cld_cl;
-        weak_cld_cl    = &scan_only_cld_cl;
-      }
-
       double start_strong_roots_sec = os::elapsedTime();
-      _root_processor->evacuate_roots(strong_root_cl,
-                                      weak_root_cl,
-                                      strong_cld_cl,
-                                      weak_cld_cl,
-                                      trace_metadata,
-                                      worker_id);
+
+      _root_processor->evacuate_roots(pss->closures(), worker_id);
 
       G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
+
+      // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
+      // treating the nmethods visited to act as roots for concurrent marking.
+      // We only want to make sure that the oops in the nmethods are adjusted with regard to the
+      // objects copied by the current evacuation.
       size_t cards_scanned = _g1h->g1_rem_set()->oops_into_collection_set_do(&push_heap_rs_cl,
-                                                                             weak_root_cl,
+                                                                             pss->closures()->weak_codeblobs(),
                                                                              worker_id);
 
       _pss->add_cards_scanned(worker_id, cards_scanned);
@@ -5294,19 +4905,8 @@
     G1ParScanThreadState*          pss = _pss->state_for_worker(worker_id);
     pss->set_ref_processor(NULL);
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
-
-    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-
-    if (_g1h->collector_state()->during_initial_mark_pause()) {
-      // We also need to mark copied objects.
-      copy_non_heap_cl = &copy_mark_non_heap_cl;
-    }
-
     // Keep alive closure.
-    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
+    G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
 
     // Complete GC closure
     G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
@@ -5394,23 +4994,12 @@
     pss->set_ref_processor(NULL);
     assert(pss->queue_is_empty(), "both queue and overflow should be empty");
 
-    G1ParScanExtRootClosure        only_copy_non_heap_cl(_g1h, pss);
-
-    G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss);
-
-    OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-
-    if (_g1h->collector_state()->during_initial_mark_pause()) {
-      // We also need to mark copied objects.
-      copy_non_heap_cl = &copy_mark_non_heap_cl;
-    }
-
     // Is alive closure
     G1AlwaysAliveClosure always_alive(_g1h);
 
     // Copying keep alive closure. Applied to referent objects that need
     // to be copied.
-    G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
+    G1CopyingKeepAliveClosure keep_alive(_g1h, pss->closures()->raw_strong_oops(), pss);
 
     ReferenceProcessor* rp = _g1h->ref_processor_cm();
 
@@ -5500,23 +5089,8 @@
   pss->set_ref_processor(NULL);
   assert(pss->queue_is_empty(), "pre-condition");
 
-  // We do not embed a reference processor in the copying/scanning
-  // closures while we're actually processing the discovered
-  // reference objects.
-
-  G1ParScanExtRootClosure        only_copy_non_heap_cl(this, pss);
-
-  G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss);
-
-  OopClosure*                    copy_non_heap_cl = &only_copy_non_heap_cl;
-
-  if (collector_state()->during_initial_mark_pause()) {
-    // We also need to mark copied objects.
-    copy_non_heap_cl = &copy_mark_non_heap_cl;
-  }
-
   // Keep alive closure.
-  G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
+  G1CopyingKeepAliveClosure keep_alive(this, pss->closures()->raw_strong_oops(), pss);
 
   // Serial Complete GC closure
   G1STWDrainQueueClosure drain_queue(this, pss);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -39,6 +39,7 @@
 #include "gc/g1/hSpaceCounters.hpp"
 #include "gc/g1/heapRegionManager.hpp"
 #include "gc/g1/heapRegionSet.hpp"
+#include "gc/g1/youngList.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/collectedHeap.hpp"
 #include "gc/shared/plab.hpp"
@@ -64,7 +65,6 @@
 class CompactibleSpaceClosure;
 class Space;
 class G1CollectorPolicy;
-class GenRemSet;
 class G1RemSet;
 class HeapRegionRemSetIterator;
 class ConcurrentMark;
@@ -88,79 +88,6 @@
 typedef int RegionIdx_t;   // needs to hold [ 0..max_regions() )
 typedef int CardIdx_t;     // needs to hold [ 0..CardsPerRegion )
 
-class YoungList : public CHeapObj<mtGC> {
-private:
-  G1CollectedHeap* _g1h;
-
-  HeapRegion* _head;
-
-  HeapRegion* _survivor_head;
-  HeapRegion* _survivor_tail;
-
-  HeapRegion* _curr;
-
-  uint        _length;
-  uint        _survivor_length;
-
-  size_t      _last_sampled_rs_lengths;
-  size_t      _sampled_rs_lengths;
-
-  void         empty_list(HeapRegion* list);
-
-public:
-  YoungList(G1CollectedHeap* g1h);
-
-  void         push_region(HeapRegion* hr);
-  void         add_survivor_region(HeapRegion* hr);
-
-  void         empty_list();
-  bool         is_empty() { return _length == 0; }
-  uint         length() { return _length; }
-  uint         eden_length() { return length() - survivor_length(); }
-  uint         survivor_length() { return _survivor_length; }
-
-  // Currently we do not keep track of the used byte sum for the
-  // young list and the survivors and it'd be quite a lot of work to
-  // do so. When we'll eventually replace the young list with
-  // instances of HeapRegionLinkedList we'll get that for free. So,
-  // we'll report the more accurate information then.
-  size_t       eden_used_bytes() {
-    assert(length() >= survivor_length(), "invariant");
-    return (size_t) eden_length() * HeapRegion::GrainBytes;
-  }
-  size_t       survivor_used_bytes() {
-    return (size_t) survivor_length() * HeapRegion::GrainBytes;
-  }
-
-  void rs_length_sampling_init();
-  bool rs_length_sampling_more();
-  void rs_length_sampling_next();
-
-  void reset_sampled_info() {
-    _last_sampled_rs_lengths =   0;
-  }
-  size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
-
-  // for development purposes
-  void reset_auxilary_lists();
-  void clear() { _head = NULL; _length = 0; }
-
-  void clear_survivors() {
-    _survivor_head    = NULL;
-    _survivor_tail    = NULL;
-    _survivor_length  = 0;
-  }
-
-  HeapRegion* first_region() { return _head; }
-  HeapRegion* first_survivor_region() { return _survivor_head; }
-  HeapRegion* last_survivor_region() { return _survivor_tail; }
-
-  // debugging
-  bool          check_list_well_formed();
-  bool          check_list_empty(bool check_sample = true);
-  void          print();
-};
-
 // The G1 STW is alive closure.
 // An instance is embedded into the G1CH and used as the
 // (optional) _is_alive_non_header closure in the STW
@@ -1083,9 +1010,11 @@
   // continues humongous regions too.
   void reset_gc_time_stamps(HeapRegion* hr);
 
-  void iterate_dirty_card_closure(CardTableEntryClosure* cl,
-                                  DirtyCardQueue* into_cset_dcq,
-                                  bool concurrent, uint worker_i);
+  // Apply the given closure on all cards in the Hot Card Cache, emptying it.
+  void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i);
+
+  // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it.
+  void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i);
 
   // The shared block offset table array.
   G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -80,6 +80,7 @@
 };
 
 G1CollectorPolicy::G1CollectorPolicy() :
+  _predictor(G1ConfidencePercent / 100.0),
   _parallel_gc_threads(ParallelGCThreads),
 
   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
@@ -92,6 +93,7 @@
   _prev_collection_pause_end_ms(0.0),
   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
+  _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
@@ -126,8 +128,6 @@
   _survivor_cset_region_length(0),
   _old_cset_region_length(0),
 
-  _sigma(G1ConfidencePercent / 100.0),
-
   _collection_set(NULL),
   _collection_set_bytes_used_before(0),
 
@@ -150,12 +150,12 @@
 
   _gc_overhead_perc(0.0) {
 
-  // SurvRateGroups below must be initialized after '_sigma' because they
-  // indirectly access '_sigma' through this object passed to their constructor.
+  // SurvRateGroups below must be initialized after the predictor because they
+  // indirectly use it through this object passed to their constructor.
   _short_lived_surv_rate_group =
-    new SurvRateGroup(this, "Short Lived", G1YoungSurvRateNumRegionsSummary);
+    new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
   _survivor_surv_rate_group =
-    new SurvRateGroup(this, "Survivor", G1YoungSurvRateNumRegionsSummary);
+    new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 
   // Set up the region size and associated fields. Given that the
   // policy is created before the heap, we have to set this up here,
@@ -192,6 +192,7 @@
 
   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
+  _cost_scan_hcc_seq->add(0.0);
   _young_cards_per_entry_ratio_seq->add(
                                   young_cards_per_entry_ratio_defaults[index]);
   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
@@ -287,9 +288,13 @@
   _collectionSetChooser = new CollectionSetChooser();
 }
 
+double G1CollectorPolicy::get_new_prediction(TruncatedSeq const* seq) const {
+  return _predictor.get_new_prediction(seq);
+}
+
 void G1CollectorPolicy::initialize_alignments() {
   _space_alignment = HeapRegion::GrainBytes;
-  size_t card_table_alignment = GenRemSet::max_alignment_constraint();
+  size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   _heap_alignment = MAX3(card_table_alignment, _space_alignment, page_size);
 }
@@ -314,8 +319,7 @@
   }
 }
 
-const G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
-G1CollectorState* G1CollectorPolicy::collector_state() { return _g1->collector_state(); }
+G1CollectorState* G1CollectorPolicy::collector_state() const { return _g1->collector_state(); }
 
 G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults), _adaptive_size(true),
         _min_desired_young_length(0), _max_desired_young_length(0) {
@@ -426,8 +430,8 @@
     _young_list_fixed_length = _young_gen_sizer->min_desired_young_length();
   }
   _free_regions_at_end_of_collection = _g1->num_free_regions();
+
   update_young_list_target_length();
-
   // We may immediately start allocating regions and placing them on the
   // collection set list. Initialize the per-collection set info
   start_incremental_cset_building();
@@ -458,9 +462,8 @@
     return false;
   }
 
-  size_t free_bytes =
-                   (base_free_regions - young_length) * HeapRegion::GrainBytes;
-  if ((2.0 * sigma()) * (double) bytes_to_copy > (double) free_bytes) {
+  size_t free_bytes = (base_free_regions - young_length) * HeapRegion::GrainBytes;
+  if ((2.0 /* magic */ * _predictor.sigma()) * bytes_to_copy > free_bytes) {
     // end condition 3: out-of-space (conservatively!)
     return false;
   }
@@ -1046,10 +1049,12 @@
 
   if (update_stats) {
     double cost_per_card_ms = 0.0;
+    double cost_scan_hcc = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
     if (_pending_cards > 0) {
-      cost_per_card_ms = phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) / (double) _pending_cards;
+      cost_per_card_ms = (phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - cost_scan_hcc) / (double) _pending_cards;
       _cost_per_card_ms_seq->add(cost_per_card_ms);
     }
+    _cost_scan_hcc_seq->add(cost_scan_hcc);
 
     double cost_per_entry_ms = 0.0;
     if (cards_scanned > 10) {
@@ -1146,8 +1151,25 @@
 
   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
-  adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS),
-                               phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS), update_rs_time_goal_ms);
+
+  double scan_hcc_time_ms = phase_times()->average_time_ms(G1GCPhaseTimes::ScanHCC);
+
+  if (update_rs_time_goal_ms < scan_hcc_time_ms) {
+    ergo_verbose2(ErgoTiming,
+                  "adjust concurrent refinement thresholds",
+                  ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
+                  ergo_format_ms("Update RS time goal")
+                  ergo_format_ms("Scan HCC time"),
+                  update_rs_time_goal_ms,
+                  scan_hcc_time_ms);
+
+    update_rs_time_goal_ms = 0;
+  } else {
+    update_rs_time_goal_ms -= scan_hcc_time_ms;
+  }
+  adjust_concurrent_refinement(phase_times()->average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
+                               phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
+                               update_rs_time_goal_ms);
 
   _collectionSetChooser->verify();
 }
@@ -1248,7 +1270,7 @@
     cg1r->set_red_zone(g * k_gr);
     cg1r->reinitialize_threads();
 
-    int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * sigma()), 1);
+    int processing_threshold_delta = MAX2((int)(cg1r->green_zone() * _predictor.sigma()), 1);
     int processing_threshold = MIN2(cg1r->green_zone() + processing_threshold_delta,
                                     cg1r->yellow_zone());
     // Change the barrier params
@@ -1265,17 +1287,125 @@
   dcqs.notify_if_necessary();
 }
 
-double
-G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
-                                                size_t scanned_cards) const {
+size_t G1CollectorPolicy::predict_rs_length_diff() const {
+  return (size_t) get_new_prediction(_rs_length_diff_seq);
+}
+
+double G1CollectorPolicy::predict_alloc_rate_ms() const {
+  return get_new_prediction(_alloc_rate_ms_seq);
+}
+
+double G1CollectorPolicy::predict_cost_per_card_ms() const {
+  return get_new_prediction(_cost_per_card_ms_seq);
+}
+
+double G1CollectorPolicy::predict_scan_hcc_ms() const {
+  return get_new_prediction(_cost_scan_hcc_seq);
+}
+
+double G1CollectorPolicy::predict_rs_update_time_ms(size_t pending_cards) const {
+  return pending_cards * predict_cost_per_card_ms() + predict_scan_hcc_ms();
+}
+
+double G1CollectorPolicy::predict_young_cards_per_entry_ratio() const {
+  return get_new_prediction(_young_cards_per_entry_ratio_seq);
+}
+
+double G1CollectorPolicy::predict_mixed_cards_per_entry_ratio() const {
+  if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
+    return predict_young_cards_per_entry_ratio();
+  } else {
+    return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
+  }
+}
+
+size_t G1CollectorPolicy::predict_young_card_num(size_t rs_length) const {
+  return (size_t) (rs_length * predict_young_cards_per_entry_ratio());
+}
+
+size_t G1CollectorPolicy::predict_non_young_card_num(size_t rs_length) const {
+  return (size_t)(rs_length * predict_mixed_cards_per_entry_ratio());
+}
+
+double G1CollectorPolicy::predict_rs_scan_time_ms(size_t card_num) const {
+  if (collector_state()->gcs_are_young()) {
+    return card_num * get_new_prediction(_cost_per_entry_ms_seq);
+  } else {
+    return predict_mixed_rs_scan_time_ms(card_num);
+  }
+}
+
+double G1CollectorPolicy::predict_mixed_rs_scan_time_ms(size_t card_num) const {
+  if (_mixed_cost_per_entry_ms_seq->num() < 3) {
+    return card_num * get_new_prediction(_cost_per_entry_ms_seq);
+  } else {
+    return card_num * get_new_prediction(_mixed_cost_per_entry_ms_seq);
+  }
+}
+
+double G1CollectorPolicy::predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
+  if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
+    return (1.1 * bytes_to_copy) * get_new_prediction(_cost_per_byte_ms_seq);
+  } else {
+    return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_during_cm_seq);
+  }
+}
+
+double G1CollectorPolicy::predict_object_copy_time_ms(size_t bytes_to_copy) const {
+  if (collector_state()->during_concurrent_mark()) {
+    return predict_object_copy_time_ms_during_cm(bytes_to_copy);
+  } else {
+    return bytes_to_copy * get_new_prediction(_cost_per_byte_ms_seq);
+  }
+}
+
+double G1CollectorPolicy::predict_constant_other_time_ms() const {
+  return get_new_prediction(_constant_other_time_ms_seq);
+}
+
+double G1CollectorPolicy::predict_young_other_time_ms(size_t young_num) const {
+  return young_num * get_new_prediction(_young_other_cost_per_region_ms_seq);
+}
+
+double G1CollectorPolicy::predict_non_young_other_time_ms(size_t non_young_num) const {
+  return non_young_num * get_new_prediction(_non_young_other_cost_per_region_ms_seq);
+}
+
+double G1CollectorPolicy::predict_remark_time_ms() const {
+  return get_new_prediction(_concurrent_mark_remark_times_ms);
+}
+
+double G1CollectorPolicy::predict_cleanup_time_ms() const {
+  return get_new_prediction(_concurrent_mark_cleanup_times_ms);
+}
+
+double G1CollectorPolicy::predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
+  TruncatedSeq* seq = surv_rate_group->get_seq(age);
+  guarantee(seq->num() > 0, "There should be some young gen survivor samples available. Tried to access with age %d", age);
+  double pred = get_new_prediction(seq);
+  if (pred > 1.0) {
+    pred = 1.0;
+  }
+  return pred;
+}
+
+double G1CollectorPolicy::predict_yg_surv_rate(int age) const {
+  return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
+}
+
+double G1CollectorPolicy::accum_yg_surv_rate_pred(int age) const {
+  return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
+}
+
+double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards,
+                                                       size_t scanned_cards) const {
   return
     predict_rs_update_time_ms(pending_cards) +
     predict_rs_scan_time_ms(scanned_cards) +
     predict_constant_other_time_ms();
 }
 
-double
-G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
+double G1CollectorPolicy::predict_base_elapsed_time_ms(size_t pending_cards) const {
   size_t rs_length = predict_rs_length_diff();
   size_t card_num;
   if (collector_state()->gcs_are_young()) {
@@ -1294,14 +1424,13 @@
     assert(hr->is_young() && hr->age_in_surv_rate_group() != -1, "invariant");
     int age = hr->age_in_surv_rate_group();
     double yg_surv_rate = predict_yg_surv_rate(age, hr->surv_rate_group());
-    bytes_to_copy = (size_t) ((double) hr->used() * yg_surv_rate);
+    bytes_to_copy = (size_t) (hr->used() * yg_surv_rate);
   }
   return bytes_to_copy;
 }
 
-double
-G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
-                                                  bool for_young_gc) const {
+double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
+                                                         bool for_young_gc) const {
   size_t rs_length = hr->rem_set()->occupied();
   size_t card_num;
 
@@ -1328,9 +1457,8 @@
   return region_elapsed_time_ms;
 }
 
-void
-G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
-                                            uint survivor_cset_region_length) {
+void G1CollectorPolicy::init_cset_region_lengths(uint eden_cset_region_length,
+                                                 uint survivor_cset_region_length) {
   _eden_cset_region_length     = eden_cset_region_length;
   _survivor_cset_region_length = survivor_cset_region_length;
   _old_cset_region_length      = 0;
--- a/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorPolicy.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -29,6 +29,7 @@
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1InCSetState.hpp"
 #include "gc/g1/g1MMUTracker.hpp"
+#include "gc/g1/g1Predictions.hpp"
 #include "gc/shared/collectorPolicy.hpp"
 
 // A G1CollectorPolicy makes policy decisions that determine the
@@ -161,7 +162,11 @@
 };
 
 class G1CollectorPolicy: public CollectorPolicy {
-private:
+ private:
+  G1Predictions _predictor;
+
+  double get_new_prediction(TruncatedSeq const* seq) const;
+
   // either equal to the number of parallel threads, if ParallelGCThreads
   // has been set, or 1 otherwise
   int _parallel_gc_threads;
@@ -169,10 +174,6 @@
   // The number of GC threads currently active.
   uintx _no_of_gc_threads;
 
-  enum SomePrivateConstants {
-    NumPrevPausesForHeuristics = 10
-  };
-
   G1MMUTracker* _mmu_tracker;
 
   void initialize_alignments();
@@ -211,7 +212,8 @@
   uint   _reserve_regions;
 
   enum PredictionConstants {
-    TruncatedSeqLength = 10
+    TruncatedSeqLength = 10,
+    NumPrevPausesForHeuristics = 10
   };
 
   TruncatedSeq* _alloc_rate_ms_seq;
@@ -219,6 +221,7 @@
 
   TruncatedSeq* _rs_length_diff_seq;
   TruncatedSeq* _cost_per_card_ms_seq;
+  TruncatedSeq* _cost_scan_hcc_seq;
   TruncatedSeq* _young_cards_per_entry_ratio_seq;
   TruncatedSeq* _mixed_cards_per_entry_ratio_seq;
   TruncatedSeq* _cost_per_entry_ms_seq;
@@ -250,25 +253,9 @@
 
   size_t _recorded_rs_lengths;
   size_t _max_rs_lengths;
-  double _sigma;
 
   size_t _rs_lengths_prediction;
 
-  double sigma() const { return _sigma; }
-
-  // A function that prevents us putting too much stock in small sample
-  // sets.  Returns a number between 2.0 and 1.0, depending on the number
-  // of samples.  5 or more samples yields one; fewer scales linearly from
-  // 2.0 at 1 sample to 1.0 at 5.
-  double confidence_factor(int samples) const {
-    if (samples > 4) return 1.0;
-    else return  1.0 + sigma() * ((double)(5 - samples))/2.0;
-  }
-
-  double get_new_neg_prediction(TruncatedSeq* seq) {
-    return seq->davg() - sigma() * seq->dsd();
-  }
-
 #ifndef PRODUCT
   bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
 #endif // PRODUCT
@@ -285,6 +272,8 @@
   size_t _pending_cards;
 
 public:
+  G1Predictions& predictor() { return _predictor; }
+
   // Accessors
 
   void set_region_eden(HeapRegion* hr, int young_index_in_cset) {
@@ -303,102 +292,41 @@
   bool verify_young_ages();
 #endif // PRODUCT
 
-  double get_new_prediction(TruncatedSeq* seq) const {
-    return MAX2(seq->davg() + sigma() * seq->dsd(),
-                seq->davg() * confidence_factor(seq->num()));
-  }
-
   void record_max_rs_lengths(size_t rs_lengths) {
     _max_rs_lengths = rs_lengths;
   }
 
-  size_t predict_rs_length_diff() const {
-    return (size_t) get_new_prediction(_rs_length_diff_seq);
-  }
+  size_t predict_rs_length_diff() const;
 
-  double predict_alloc_rate_ms() const {
-    return get_new_prediction(_alloc_rate_ms_seq);
-  }
+  double predict_alloc_rate_ms() const;
 
-  double predict_cost_per_card_ms() const {
-    return get_new_prediction(_cost_per_card_ms_seq);
-  }
+  double predict_cost_per_card_ms() const;
 
-  double predict_rs_update_time_ms(size_t pending_cards) const {
-    return (double) pending_cards * predict_cost_per_card_ms();
-  }
+  double predict_scan_hcc_ms() const;
 
-  double predict_young_cards_per_entry_ratio() const {
-    return get_new_prediction(_young_cards_per_entry_ratio_seq);
-  }
+  double predict_rs_update_time_ms(size_t pending_cards) const;
+
+  double predict_young_cards_per_entry_ratio() const;
 
-  double predict_mixed_cards_per_entry_ratio() const {
-    if (_mixed_cards_per_entry_ratio_seq->num() < 2) {
-      return predict_young_cards_per_entry_ratio();
-    } else {
-      return get_new_prediction(_mixed_cards_per_entry_ratio_seq);
-    }
-  }
+  double predict_mixed_cards_per_entry_ratio() const;
 
-  size_t predict_young_card_num(size_t rs_length) const {
-    return (size_t) ((double) rs_length *
-                     predict_young_cards_per_entry_ratio());
-  }
-
-  size_t predict_non_young_card_num(size_t rs_length) const {
-    return (size_t) ((double) rs_length *
-                     predict_mixed_cards_per_entry_ratio());
-  }
+  size_t predict_young_card_num(size_t rs_length) const;
 
-  double predict_rs_scan_time_ms(size_t card_num) const {
-    if (collector_state()->gcs_are_young()) {
-      return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
-    } else {
-      return predict_mixed_rs_scan_time_ms(card_num);
-    }
-  }
+  size_t predict_non_young_card_num(size_t rs_length) const;
+
+  double predict_rs_scan_time_ms(size_t card_num) const;
 
-  double predict_mixed_rs_scan_time_ms(size_t card_num) const {
-    if (_mixed_cost_per_entry_ms_seq->num() < 3) {
-      return (double) card_num * get_new_prediction(_cost_per_entry_ms_seq);
-    } else {
-      return (double) (card_num *
-                       get_new_prediction(_mixed_cost_per_entry_ms_seq));
-    }
-  }
+  double predict_mixed_rs_scan_time_ms(size_t card_num) const;
+
+  double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const;
 
-  double predict_object_copy_time_ms_during_cm(size_t bytes_to_copy) const {
-    if (_cost_per_byte_ms_during_cm_seq->num() < 3) {
-      return (1.1 * (double) bytes_to_copy) *
-              get_new_prediction(_cost_per_byte_ms_seq);
-    } else {
-      return (double) bytes_to_copy *
-             get_new_prediction(_cost_per_byte_ms_during_cm_seq);
-    }
-  }
+  double predict_object_copy_time_ms(size_t bytes_to_copy) const;
+
+  double predict_constant_other_time_ms() const;
 
-  double predict_object_copy_time_ms(size_t bytes_to_copy) const {
-    if (collector_state()->during_concurrent_mark()) {
-      return predict_object_copy_time_ms_during_cm(bytes_to_copy);
-    } else {
-      return (double) bytes_to_copy *
-              get_new_prediction(_cost_per_byte_ms_seq);
-    }
-  }
+  double predict_young_other_time_ms(size_t young_num) const;
 
-  double predict_constant_other_time_ms() const {
-    return get_new_prediction(_constant_other_time_ms_seq);
-  }
-
-  double predict_young_other_time_ms(size_t young_num) const {
-    return (double) young_num *
-           get_new_prediction(_young_other_cost_per_region_ms_seq);
-  }
-
-  double predict_non_young_other_time_ms(size_t non_young_num) const {
-    return (double) non_young_num *
-           get_new_prediction(_non_young_other_cost_per_region_ms_seq);
-  }
+  double predict_non_young_other_time_ms(size_t non_young_num) const;
 
   double predict_base_elapsed_time_ms(size_t pending_cards) const;
   double predict_base_elapsed_time_ms(size_t pending_cards,
@@ -415,11 +343,15 @@
 
   double predict_survivor_regions_evac_time() const;
 
+  bool should_update_surv_rate_group_predictors() {
+    return collector_state()->last_gc_was_young() && !collector_state()->in_marking_window();
+  }
+
   void cset_regions_freed() {
-    bool propagate = collector_state()->should_propagate();
-    _short_lived_surv_rate_group->all_surviving_words_recorded(propagate);
-    _survivor_surv_rate_group->all_surviving_words_recorded(propagate);
-    // also call it on any more surv rate groups
+    bool update = should_update_surv_rate_group_predictors();
+
+    _short_lived_surv_rate_group->all_surviving_words_recorded(update);
+    _survivor_surv_rate_group->all_surviving_words_recorded(update);
   }
 
   G1MMUTracker* mmu_tracker() {
@@ -434,34 +366,17 @@
     return _mmu_tracker->max_gc_time() * 1000.0;
   }
 
-  double predict_remark_time_ms() const {
-    return get_new_prediction(_concurrent_mark_remark_times_ms);
-  }
+  double predict_remark_time_ms() const;
 
-  double predict_cleanup_time_ms() const {
-    return get_new_prediction(_concurrent_mark_cleanup_times_ms);
-  }
+  double predict_cleanup_time_ms() const;
 
   // Returns an estimate of the survival rate of the region at yg-age
   // "yg_age".
-  double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const {
-    TruncatedSeq* seq = surv_rate_group->get_seq(age);
-    if (seq->num() == 0)
-      gclog_or_tty->print("BARF! age is %d", age);
-    guarantee( seq->num() > 0, "invariant" );
-    double pred = get_new_prediction(seq);
-    if (pred > 1.0)
-      pred = 1.0;
-    return pred;
-  }
+  double predict_yg_surv_rate(int age, SurvRateGroup* surv_rate_group) const;
 
-  double predict_yg_surv_rate(int age) const {
-    return predict_yg_surv_rate(age, _short_lived_surv_rate_group);
-  }
+  double predict_yg_surv_rate(int age) const;
 
-  double accum_yg_surv_rate_pred(int age) const {
-    return _short_lived_surv_rate_group->accum_surv_rate_pred(age);
-  }
+  double accum_yg_surv_rate_pred(int age) const;
 
 private:
   // Statistics kept per GC stoppage, pause or full.
@@ -608,8 +523,7 @@
 
   virtual G1CollectorPolicy* as_g1_policy() { return this; }
 
-  const G1CollectorState* collector_state() const;
-  G1CollectorState* collector_state();
+  G1CollectorState* collector_state() const;
 
   G1GCPhaseTimes* phase_times() const { return _phase_times; }
 
@@ -883,15 +797,4 @@
   virtual void post_heap_initialize();
 };
 
-// This should move to some place more general...
-
-// If we have "n" measurements, and we've kept track of their "sum" and the
-// "sum_of_squares" of the measurements, this returns the variance of the
-// sequence.
-inline double variance(int n, double sum_of_squares, double sum) {
-  double n_d = (double)n;
-  double avg = sum/n_d;
-  return (sum_of_squares - 2.0 * avg * sum + n_d * avg * avg) / n_d;
-}
-
 #endif // SHARE_VM_GC_G1_G1COLLECTORPOLICY_HPP
--- a/hotspot/src/share/vm/gc/g1/g1CollectorState.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectorState.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -121,11 +121,7 @@
     return (_in_marking_window && !_in_marking_window_im);
   }
 
-  bool should_propagate() const { // XXX should have a more suitable state name or abstraction for this
-    return (_last_young_gc && !_in_marking_window);
-  }
-
-  G1YCType yc_type() {
+  G1YCType yc_type() const {
     if (during_initial_mark_pause()) {
       return InitialMark;
     } else if (mark_in_progress()) {
--- a/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -56,6 +56,7 @@
   case ErgoCSetConstruction:  return "CSet Construction";
   case ErgoConcCycles:        return "Concurrent Cycles";
   case ErgoMixedGCs:          return "Mixed GCs";
+  case ErgoTiming:            return "Timing";
   default:
     ShouldNotReachHere();
     // Keep the Windows compiler happy
--- a/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ErgoVerbose.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -70,6 +70,7 @@
   ErgoCSetConstruction,
   ErgoConcCycles,
   ErgoMixedGCs,
+  ErgoTiming,
 
   ErgoHeuristicNum
 } ErgoHeuristic;
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
 #include "gc/g1/g1Log.hpp"
@@ -269,6 +270,8 @@
   _gc_par_phases[SATBFiltering] = new WorkerDataArray<double>(max_gc_threads, "SATB Filtering (ms)", true, G1Log::LevelFinest, 3);
 
   _gc_par_phases[UpdateRS] = new WorkerDataArray<double>(max_gc_threads, "Update RS (ms)", true, G1Log::LevelFiner, 2);
+  _gc_par_phases[ScanHCC] = new WorkerDataArray<double>(max_gc_threads, "Scan HCC (ms)", true, G1Log::LevelFiner, 3);
+  _gc_par_phases[ScanHCC]->set_enabled(ConcurrentG1Refine::hot_card_cache_enabled());
   _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms)", true, G1Log::LevelFiner, 2);
   _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms)", true, G1Log::LevelFiner, 2);
   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms)", true, G1Log::LevelFiner, 2);
--- a/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -56,6 +56,7 @@
     WeakCLDRoots,
     SATBFiltering,
     UpdateRS,
+    ScanHCC,
     ScanRS,
     CodeRoots,
     ObjCopy,
--- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -26,7 +26,6 @@
 #include "gc/g1/dirtyCardQueue.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1HotCardCache.hpp"
-#include "gc/g1/g1RemSet.hpp"
 #include "runtime/atomic.inline.hpp"
 
 G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
@@ -81,9 +80,7 @@
   return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
 }
 
-void G1HotCardCache::drain(uint worker_i,
-                           G1RemSet* g1rs,
-                           DirtyCardQueue* into_cset_dcq) {
+void G1HotCardCache::drain(CardTableEntryClosure* cl, uint worker_i) {
   if (!default_use_cache()) {
     assert(_hot_cache == NULL, "Logic");
     return;
@@ -101,21 +98,8 @@
     for (size_t i = start_idx; i < end_idx; i++) {
       jbyte* card_ptr = _hot_cache[i];
       if (card_ptr != NULL) {
-        if (g1rs->refine_card(card_ptr, worker_i, true)) {
-          // The part of the heap spanned by the card contains references
-          // that point into the current collection set.
-          // We need to record the card pointer in the DirtyCardQueueSet
-          // that we use for such cards.
-          //
-          // The only time we care about recording cards that contain
-          // references that point into the collection set is during
-          // RSet updating while within an evacuation pause.
-          // In this case worker_i should be the id of a GC worker thread
-          assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
-          assert(worker_i < ParallelGCThreads, "incorrect worker id: %u", worker_i);
-
-          into_cset_dcq->enqueue(card_ptr);
-        }
+        bool result = cl->do_card_ptr(card_ptr, worker_i);
+        assert(result, "Closure should always return true");
       } else {
         break;
       }
--- a/hotspot/src/share/vm/gc/g1/g1HotCardCache.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1HotCardCache.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -32,9 +32,9 @@
 #include "runtime/thread.hpp"
 #include "utilities/globalDefinitions.hpp"
 
+class CardTableEntryClosure;
 class DirtyCardQueue;
 class G1CollectedHeap;
-class G1RemSet;
 class HeapRegion;
 
 // An evicting cache of cards that have been logged by the G1 post
@@ -84,11 +84,11 @@
   // The number of cached cards a thread claims when flushing the cache
   static const int ClaimChunkSize = 32;
 
-  bool default_use_cache() const {
+ public:
+  static bool default_use_cache() {
     return (G1ConcRSLogCacheSize > 0);
   }
 
- public:
   G1HotCardCache(G1CollectedHeap* g1h);
   ~G1HotCardCache();
 
@@ -113,7 +113,7 @@
 
   // Refine the cards that have delayed as a result of
   // being in the cache.
-  void drain(uint worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
+  void drain(CardTableEntryClosure* cl, uint worker_i);
 
   // Set up for parallel processing of the cards in the hot cache
   void reset_hot_cache_claimed_index() {
--- a/hotspot/src/share/vm/gc/g1/g1InCSetState.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1InCSetState.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -51,15 +51,11 @@
   enum {
     // Selection of the values were driven to micro-optimize the encoding and
     // frequency of the checks.
-    // The most common check is whether the region is in the collection set or not.
-    // This encoding allows us to use an != 0 check which in some architectures
-    // (x86*) can be encoded slightly more efficently than a normal comparison
-    // against zero.
-    // The same situation occurs when checking whether the region is humongous
-    // or not, which is encoded by values < 0.
+    // The most common check is whether the region is in the collection set or not,
+    // this encoding allows us to use an > 0 check.
     // The other values are simply encoded in increasing generation order, which
     // makes getting the next generation fast by a simple increment.
-    Humongous    = -1,    // The region is humongous - note that actually any value < 0 would be possible here.
+    Humongous    = -1,    // The region is humongous
     NotInCSet    =  0,    // The region is not in the collection set.
     Young        =  1,    // The region is in the collection set and a young region.
     Old          =  2,    // The region is in the collection set and an old region.
@@ -74,9 +70,10 @@
 
   void set_old()                       { _value = Old; }
 
-  bool is_in_cset_or_humongous() const { return _value != NotInCSet; }
+  bool is_in_cset_or_humongous() const { return is_in_cset() || is_humongous(); }
   bool is_in_cset() const              { return _value > NotInCSet; }
-  bool is_humongous() const            { return _value < NotInCSet; }
+
+  bool is_humongous() const            { return _value == Humongous; }
   bool is_young() const                { return _value == Young; }
   bool is_old() const                  { return _value == Old; }
 
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -31,31 +31,32 @@
 #include "utilities/stack.inline.hpp"
 
 G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state) :
-  G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL),
-  _cm(_g1->concurrent_mark()) { }
-
-G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1) :
-  G1ParClosureSuper(g1), _scanned_klass(NULL),
-  _cm(_g1->concurrent_mark()) { }
-
-G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1) :
-  _g1(g1), _par_scan_state(NULL), _worker_id(UINT_MAX) { }
+  G1ParClosureSuper(g1, par_scan_state),
+  _worker_id(par_scan_state->worker_id()),
+  _scanned_klass(NULL),
+  _cm(_g1->concurrent_mark())
+{ }
 
 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
-  _g1(g1), _par_scan_state(NULL),
-  _worker_id(UINT_MAX) {
-  set_par_scan_thread_state(par_scan_state);
-}
+  _g1(g1), _par_scan_state(par_scan_state)
+{ }
+
+void G1KlassScanClosure::do_klass(Klass* klass) {
+  // If the klass has not been dirtied we know that there's
+  // no references into  the young gen and we can skip it.
+  if (!_process_only_dirty || klass->has_modified_oops()) {
+    // Clean the klass since we're going to scavenge all the metadata.
+    klass->clear_modified_oops();
 
-void G1ParClosureSuper::set_par_scan_thread_state(G1ParScanThreadState* par_scan_state) {
-  assert(_par_scan_state == NULL, "_par_scan_state must only be set once");
-  assert(par_scan_state != NULL, "Must set par_scan_state to non-NULL.");
+    // Tell the closure that this klass is the Klass to scavenge
+    // and is the one to dirty if oops are left pointing into the young gen.
+    _closure->set_scanned_klass(klass);
 
-  _par_scan_state = par_scan_state;
-  _worker_id = par_scan_state->worker_id();
+    klass->oops_do(_closure);
 
-  assert(_worker_id < ParallelGCThreads,
-         "The given worker id %u must be less than the number of threads %u", _worker_id, ParallelGCThreads);
+    _closure->set_scanned_klass(NULL);
+  }
+  _count++;
 }
 
 // Generate G1 specialized oop_oop_iterate functions.
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -52,15 +52,12 @@
 protected:
   G1CollectedHeap* _g1;
   G1ParScanThreadState* _par_scan_state;
-  uint _worker_id;
+
+  G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
+  ~G1ParClosureSuper() { }
+
 public:
-  // Initializes the instance, leaving _par_scan_state uninitialized. Must be done
-  // later using the set_par_scan_thread_state() method.
-  G1ParClosureSuper(G1CollectedHeap* g1);
-  G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state);
-  bool apply_to_weak_ref_discovered_field() { return true; }
-
-  void set_par_scan_thread_state(G1ParScanThreadState* par_scan_state);
+  virtual bool apply_to_weak_ref_discovered_field() { return true; }
 };
 
 class G1ParPushHeapRSClosure : public G1ParClosureSuper {
@@ -76,36 +73,41 @@
 
 class G1ParScanClosure : public G1ParClosureSuper {
 public:
-  G1ParScanClosure(G1CollectedHeap* g1) : G1ParClosureSuper(g1) { }
+  G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
+    G1ParClosureSuper(g1, par_scan_state) { }
 
   template <class T> void do_oop_nv(T* p);
   virtual void do_oop(oop* p)          { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p)    { do_oop_nv(p); }
 
-  void set_ref_processor(ReferenceProcessor* ref_processor) { _ref_processor = ref_processor; }
+  void set_ref_processor(ReferenceProcessor* rp) {
+    set_ref_processor_internal(rp);
+  }
 };
 
 // Add back base class for metadata
 class G1ParCopyHelper : public G1ParClosureSuper {
 protected:
+  uint _worker_id;              // Cache value from par_scan_state.
   Klass* _scanned_klass;
   ConcurrentMark* _cm;
 
   // Mark the object if it's not already marked. This is used to mark
   // objects pointed to by roots that are guaranteed not to move
   // during the GC (i.e., non-CSet objects). It is MT-safe.
-  void mark_object(oop obj);
+  inline void mark_object(oop obj);
 
   // Mark the object if it's not already marked. This is used to mark
   // objects pointed to by roots that have been forwarded during a
   // GC. It is MT-safe.
-  void mark_forwarded_object(oop from_obj, oop to_obj);
- public:
+  inline void mark_forwarded_object(oop from_obj, oop to_obj);
+
   G1ParCopyHelper(G1CollectedHeap* g1,  G1ParScanThreadState* par_scan_state);
-  G1ParCopyHelper(G1CollectedHeap* g1);
+  ~G1ParCopyHelper() { }
 
+ public:
   void set_scanned_klass(Klass* k) { _scanned_klass = k; }
-  template <class T> void do_klass_barrier(T* p, oop new_obj);
+  template <class T> inline void do_klass_barrier(T* p, oop new_obj);
 };
 
 enum G1Barrier {
@@ -127,26 +129,23 @@
 public:
   G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
       G1ParCopyHelper(g1, par_scan_state) {
-    assert(_ref_processor == NULL, "sanity");
-  }
-
-  G1ParCopyClosure(G1CollectedHeap* g1) : G1ParCopyHelper(g1) {
-    assert(_ref_processor == NULL, "sanity");
+    assert(ref_processor() == NULL, "sanity");
   }
 
   template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
   virtual void do_oop(oop* p)       { do_oop_nv(p); }
   virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
-
-  G1CollectedHeap*      g1()  { return _g1; };
-  G1ParScanThreadState* pss() { return _par_scan_state; }
 };
 
-typedef G1ParCopyClosure<G1BarrierNone,  G1MarkNone>             G1ParScanExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierNone,  G1MarkFromRoot>         G1ParScanAndMarkExtRootClosure;
-typedef G1ParCopyClosure<G1BarrierNone,  G1MarkPromotedFromRoot> G1ParScanAndMarkWeakExtRootClosure;
-// We use a separate closure to handle references during evacuation
-// failure processing.
+class G1KlassScanClosure : public KlassClosure {
+ G1ParCopyHelper* _closure;
+ bool             _process_only_dirty;
+ int              _count;
+ public:
+  G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
+      : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
+  void do_klass(Klass* klass);
+};
 
 class FilterIntoCSClosure: public ExtendedOopClosure {
   G1CollectedHeap* _g1;
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.inline.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -91,7 +91,7 @@
       if (state.is_humongous()) {
         _g1->set_humongous_is_live(obj);
       }
-      _par_scan_state->update_rs(_from, p, _worker_id);
+      _par_scan_state->update_rs(_from, p);
     }
   }
 }
@@ -225,4 +225,78 @@
   }
 }
 
+template <class T>
+void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
+  if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
+    _scanned_klass->record_modified_oops();
+  }
+}
+
+void G1ParCopyHelper::mark_object(oop obj) {
+  assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
+
+  // We know that the object is not moving so it's safe to read its size.
+  _cm->grayRoot(obj, (size_t) obj->size(), _worker_id);
+}
+
+void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
+  assert(from_obj->is_forwarded(), "from obj should be forwarded");
+  assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee");
+  assert(from_obj != to_obj, "should not be self-forwarded");
+
+  assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet");
+  assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet");
+
+  // The object might be in the process of being copied by another
+  // worker so we cannot trust that its to-space image is
+  // well-formed. So we have to read its size from its from-space
+  // image which we know should not be changing.
+  _cm->grayRoot(to_obj, (size_t) from_obj->size(), _worker_id);
+}
+
+template <G1Barrier barrier, G1Mark do_mark_object>
+template <class T>
+void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
+  T heap_oop = oopDesc::load_heap_oop(p);
+
+  if (oopDesc::is_null(heap_oop)) {
+    return;
+  }
+
+  oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
+
+  assert(_worker_id == _par_scan_state->worker_id(), "sanity");
+
+  const InCSetState state = _g1->in_cset_state(obj);
+  if (state.is_in_cset()) {
+    oop forwardee;
+    markOop m = obj->mark();
+    if (m->is_marked()) {
+      forwardee = (oop) m->decode_pointer();
+    } else {
+      forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
+    }
+    assert(forwardee != NULL, "forwardee should not be NULL");
+    oopDesc::encode_store_heap_oop(p, forwardee);
+    if (do_mark_object != G1MarkNone && forwardee != obj) {
+      // If the object is self-forwarded we don't need to explicitly
+      // mark it, the evacuation failure protocol will do so.
+      mark_forwarded_object(obj, forwardee);
+    }
+
+    if (barrier == G1BarrierKlass) {
+      do_klass_barrier(p, forwardee);
+    }
+  } else {
+    if (state.is_humongous()) {
+      _g1->set_humongous_is_live(obj);
+    }
+    // The object is not in collection set. If we're a root scanning
+    // closure during an initial mark pause then attempt to mark the object.
+    if (do_mark_object == G1MarkFromRoot) {
+      mark_object(obj);
+    }
+  }
+}
+
 #endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -27,6 +27,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/g1ParScanThreadState.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1StringDedup.hpp"
 #include "gc/shared/taskqueue.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -37,15 +38,14 @@
     _refs(g1h->task_queue(worker_id)),
     _dcq(&g1h->dirty_card_queue_set()),
     _ct_bs(g1h->g1_barrier_set()),
-    _g1_rem(g1h->g1_rem_set()),
+    _closures(NULL),
     _hash_seed(17),
     _worker_id(worker_id),
     _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
     _age_table(false),
-    _scanner(g1h),
+    _scanner(g1h, this),
     _old_gen_is_full(false)
 {
-  _scanner.set_par_scan_thread_state(this);
   // we allocate G1YoungSurvRateNumRegions plus one entries, since
   // we "sacrifice" entry 0 to keep track of surviving bytes for
   // non-young regions (where the age is -1)
@@ -69,6 +69,8 @@
   // need to be moved to the next space.
   _dest[InCSetState::Young]        = InCSetState::Old;
   _dest[InCSetState::Old]          = InCSetState::Old;
+
+  _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
 }
 
 // Pass locally gathered statistics to global state.
@@ -86,6 +88,7 @@
 
 G1ParScanThreadState::~G1ParScanThreadState() {
   delete _plab_allocator;
+  delete _closures;
   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
 }
 
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -36,6 +36,7 @@
 #include "oops/oop.hpp"
 
 class G1PLABAllocator;
+class G1EvacuationRootClosures;
 class HeapRegion;
 class outputStream;
 
@@ -45,7 +46,7 @@
   RefToScanQueue*  _refs;
   DirtyCardQueue   _dcq;
   G1SATBCardTableModRefBS* _ct_bs;
-  G1RemSet*         _g1_rem;
+  G1EvacuationRootClosures* _closures;
 
   G1PLABAllocator*  _plab_allocator;
 
@@ -97,7 +98,7 @@
 
   template <class T> void push_on_queue(T* ref);
 
-  template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
+  template <class T> void update_rs(HeapRegion* from, T* p) {
     // If the new value of the field points to the same region or
     // is the to-space, we don't need to include it in the Rset updates.
     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
@@ -109,6 +110,7 @@
     }
   }
 
+  G1EvacuationRootClosures* closures() { return _closures; }
   uint worker_id() { return _worker_id; }
 
   // Returns the current amount of waste due to alignment or not being able to fit
--- a/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1ParScanThreadState.inline.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -56,7 +56,7 @@
   }
 
   assert(obj != NULL, "Must be");
-  update_rs(from, p, _worker_id);
+  update_rs(from, p);
 }
 
 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1Predictions.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1Predictions.hpp"
+
+#ifndef PRODUCT
+
+void G1Predictions::test() {
+  double const epsilon = 1e-6;
+  {
+    // Some basic formula tests with confidence = 0.0
+    G1Predictions predictor(0.0);
+    TruncatedSeq s;
+
+    double p0 = predictor.get_new_prediction(&s);
+    assert(p0 < epsilon, "Initial prediction of empty sequence must be 0.0 but is %f", p0);
+
+    s.add(5.0);
+    double p1 = predictor.get_new_prediction(&s);
+    assert(fabs(p1 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
+    for (int i = 0; i < 40; i++) {
+      s.add(5.0);
+    }
+    double p2 = predictor.get_new_prediction(&s);
+    assert(fabs(p2 - 5.0) < epsilon, "Prediction should be 5.0 but is %f", p1);
+  }
+
+  {
+    // The following tests checks that the initial predictions are based on the
+    // average of the sequence and not on the stddev (which is 0).
+    G1Predictions predictor(0.5);
+    TruncatedSeq s;
+
+    s.add(1.0);
+    double p1 = predictor.get_new_prediction(&s);
+    assert(p1 > 1.0, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
+    s.add(1.0);
+    double p2 = predictor.get_new_prediction(&s);
+    assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
+    s.add(1.0);
+    double p3 = predictor.get_new_prediction(&s);
+    assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
+    s.add(1.0);
+    s.add(1.0); // Five elements are now in the sequence.
+    double p5 = predictor.get_new_prediction(&s);
+    assert(p5 < p3, "Fifth prediction must be smaller than third, but they are %f %f", p3, p5);
+    assert(fabs(p5 - 1.0) < epsilon, "Prediction must be 1.0+epsilon, but is %f", p5);
+  }
+
+  {
+    // The following tests checks that initially prediction based on the average is
+    // used, that gets overridden by the stddev prediction at the end.
+    G1Predictions predictor(0.5);
+    TruncatedSeq s;
+
+    s.add(0.5);
+    double p1 = predictor.get_new_prediction(&s);
+    assert(p1 > 0.5, "First prediction must be larger than average, but avg is %f and prediction %f", s.davg(), p1);
+    s.add(0.2);
+    double p2 = predictor.get_new_prediction(&s);
+    assert(p2 < p1, "First prediction must be larger than second, but they are %f %f", p1, p2);
+    s.add(0.5);
+    double p3 = predictor.get_new_prediction(&s);
+    assert(p3 < p2, "Second prediction must be larger than third, but they are %f %f", p2, p3);
+    s.add(0.2);
+    s.add(2.0);
+    double p5 = predictor.get_new_prediction(&s);
+    assert(p5 > p3, "Fifth prediction must be bigger than third, but they are %f %f", p3, p5);
+  }
+}
+
+void TestPredictions_test() {
+  G1Predictions::test();
+}
+
+#endif
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1Predictions.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1PREDICTIONS_HPP
+#define SHARE_VM_GC_G1_G1PREDICTIONS_HPP
+
+#include "memory/allocation.inline.hpp"
+#include "utilities/numberSeq.hpp"
+
+// Utility class containing various helper methods for prediction.
+class G1Predictions VALUE_OBJ_CLASS_SPEC {
+ private:
+  double _sigma;
+
+  // This function is used to estimate the stddev of sample sets. There is some
+  // special consideration of small sample sets: the actual stddev for them is
+  // not very useful, so we calculate some value based on the sample average.
+  // Five or more samples yields zero (at that point we use the stddev); fewer
+  // scale the sample set average linearly from two times the average to 0.5 times
+  // it.
+  double stddev_estimate(TruncatedSeq const* seq) const {
+    double estimate = seq->dsd();
+    int const samples = seq->num();
+    if (samples < 5) {
+      estimate = MAX2(seq->davg() * (5 - samples) / 2.0, estimate);
+    }
+    return estimate;
+  }
+ public:
+  G1Predictions(double sigma) : _sigma(sigma) {
+    assert(sigma >= 0.0, "Confidence must be larger than or equal to zero");
+  }
+
+  // Confidence factor.
+  double sigma() const { return _sigma; }
+
+  double get_new_prediction(TruncatedSeq const* seq) const {
+    return seq->davg() + _sigma * stddev_estimate(seq);
+  }
+
+#ifndef PRODUCT
+  static void test();
+#endif
+};
+
+#endif // SHARE_VM_GC_G1_G1PREDICTIONS_HPP
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -26,7 +26,6 @@
 #include "gc/g1/concurrentG1Refine.hpp"
 #include "gc/g1/concurrentG1RefineThread.hpp"
 #include "gc/g1/g1BlockOffsetTable.inline.hpp"
-#include "gc/g1/g1CodeBlobClosure.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
@@ -228,15 +227,13 @@
 };
 
 size_t G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
-                        OopClosure* non_heap_roots,
+                        CodeBlobClosure* heap_region_codeblobs,
                         uint worker_i) {
   double rs_time_start = os::elapsedTime();
 
-  G1CodeBlobClosure code_root_cl(non_heap_roots);
-
   HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
 
-  ScanRSClosure scanRScl(oc, &code_root_cl, worker_i);
+  ScanRSClosure scanRScl(oc, heap_region_codeblobs, worker_i);
 
   _g1->collection_set_iterate_from(startRegion, &scanRScl);
   scanRScl.set_try_claimed();
@@ -263,6 +260,7 @@
                                               DirtyCardQueue* into_cset_dcq) :
     _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq)
   {}
+
   bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
     // The only time we care about recording cards that
     // contain references that point into the collection set
@@ -285,11 +283,16 @@
 };
 
 void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i) {
-  G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
-  // Apply the given closure to all remaining log entries.
   RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
 
-  _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
+  G1GCParPhaseTimesTracker x(_g1p->phase_times(), G1GCPhaseTimes::UpdateRS, worker_i);
+  {
+    // Apply the closure to the entries of the hot card cache.
+    G1GCParPhaseTimesTracker y(_g1p->phase_times(), G1GCPhaseTimes::ScanHCC, worker_i);
+    _g1->iterate_hcc_closure(&into_cset_update_rs_cl, worker_i);
+  }
+  // Apply the closure to all remaining log entries.
+  _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, worker_i);
 }
 
 void G1RemSet::cleanupHRRS() {
@@ -297,7 +300,7 @@
 }
 
 size_t G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
-                                             OopClosure* non_heap_roots,
+                                             CodeBlobClosure* heap_region_codeblobs,
                                              uint worker_i) {
 #if CARD_REPEAT_HISTO
   ct_freq_update_histo_and_reset();
@@ -320,7 +323,7 @@
   DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set());
 
   updateRS(&into_cset_dcq, worker_i);
-  size_t cards_scanned = scanRS(oc, non_heap_roots, worker_i);
+  size_t cards_scanned = scanRS(oc, heap_region_codeblobs, worker_i);
 
   // We now clear the cached values of _cset_rs_update_cl for this worker
   _cset_rs_update_cl[worker_i] = NULL;
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -95,7 +95,7 @@
   // Returns the number of cards scanned while looking for pointers
   // into the collection set.
   size_t oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
-                                     OopClosure* non_heap_roots,
+                                     CodeBlobClosure* heap_region_codeblobs,
                                      uint worker_i);
 
   // Prepare for and cleanup after an oops_into_collection_set_do
@@ -107,7 +107,7 @@
   void cleanup_after_oops_into_collection_set_do();
 
   size_t scanRS(G1ParPushHeapRSClosure* oc,
-                OopClosure* non_heap_roots,
+                CodeBlobClosure* heap_region_codeblobs,
                 uint worker_i);
 
   void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RootClosures.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/g1/bufferingOopClosure.hpp"
+#include "gc/g1/g1CodeBlobClosure.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1OopClosures.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
+
+class G1ParScanThreadState;
+
+// Simple holder object for a complete set of closures used by the G1 evacuation code.
+template <G1Mark Mark>
+class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
+public:
+  G1ParCopyClosure<G1BarrierNone,  Mark> _oops;
+  G1ParCopyClosure<G1BarrierKlass, Mark> _oop_in_klass;
+  G1KlassScanClosure                     _klass_in_cld_closure;
+  CLDToKlassAndOopClosure                _clds;
+  G1CodeBlobClosure                      _codeblobs;
+  BufferingOopClosure                    _buffered_oops;
+
+  G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
+    _oops(g1h, pss),
+    _oop_in_klass(g1h, pss),
+    _klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
+    _clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
+    _codeblobs(&_oops),
+    _buffered_oops(&_oops) {}
+};
+
+class G1EvacuationClosures : public G1EvacuationRootClosures {
+  G1SharedClosures<G1MarkNone> _closures;
+
+public:
+  G1EvacuationClosures(G1CollectedHeap* g1h,
+                       G1ParScanThreadState* pss,
+                       bool gcs_are_young) :
+      _closures(g1h, pss, gcs_are_young, /* must_claim_cld */ false) {}
+
+  OopClosure* weak_oops()   { return &_closures._buffered_oops; }
+  OopClosure* strong_oops() { return &_closures._buffered_oops; }
+
+  CLDClosure* weak_clds()             { return &_closures._clds; }
+  CLDClosure* strong_clds()           { return &_closures._clds; }
+  CLDClosure* thread_root_clds()      { return NULL; }
+  CLDClosure* second_pass_weak_clds() { return NULL; }
+
+  CodeBlobClosure* strong_codeblobs()      { return &_closures._codeblobs; }
+  CodeBlobClosure* weak_codeblobs()        { return &_closures._codeblobs; }
+
+  void flush()                 { _closures._buffered_oops.done(); }
+  double closure_app_seconds() { return _closures._buffered_oops.closure_app_seconds(); }
+
+  OopClosure* raw_strong_oops() { return &_closures._oops; }
+
+  bool trace_metadata()         { return false; }
+};
+
+// Closures used during initial mark.
+// The treatment of "weak" roots is selectable through the template parameter,
+// this is usually used to control unloading of classes and interned strings.
+template <G1Mark MarkWeak>
+class G1InitalMarkClosures : public G1EvacuationRootClosures {
+  G1SharedClosures<G1MarkFromRoot> _strong;
+  G1SharedClosures<MarkWeak>       _weak;
+
+  // Filter method to help with returning the appropriate closures
+  // depending on the class template parameter.
+  template <G1Mark Mark, typename T>
+  T* null_if(T* t) {
+    if (Mark == MarkWeak) {
+      return NULL;
+    }
+    return t;
+  }
+
+public:
+  G1InitalMarkClosures(G1CollectedHeap* g1h,
+                       G1ParScanThreadState* pss) :
+      _strong(g1h, pss, /* process_only_dirty_klasses */ false, /* must_claim_cld */ true),
+      _weak(g1h, pss,   /* process_only_dirty_klasses */ false, /* must_claim_cld */ true) {}
+
+  OopClosure* weak_oops()   { return &_weak._buffered_oops; }
+  OopClosure* strong_oops() { return &_strong._buffered_oops; }
+
+  // If MarkWeak is G1MarkPromotedFromRoot then the weak CLDs must be processed in a second pass.
+  CLDClosure* weak_clds()             { return null_if<G1MarkPromotedFromRoot>(&_weak._clds); }
+  CLDClosure* strong_clds()           { return &_strong._clds; }
+
+  // If MarkWeak is G1MarkFromRoot then all CLDs are processed by the weak and strong variants
+  // return a NULL closure for the following specialized versions in that case.
+  CLDClosure* thread_root_clds()      { return null_if<G1MarkFromRoot>(&_strong._clds); }
+  CLDClosure* second_pass_weak_clds() { return null_if<G1MarkFromRoot>(&_weak._clds); }
+
+  CodeBlobClosure* strong_codeblobs()      { return &_strong._codeblobs; }
+  CodeBlobClosure* weak_codeblobs()        { return &_weak._codeblobs; }
+
+  void flush() {
+    _strong._buffered_oops.done();
+    _weak._buffered_oops.done();
+  }
+
+  double closure_app_seconds() {
+    return _strong._buffered_oops.closure_app_seconds() +
+           _weak._buffered_oops.closure_app_seconds();
+  }
+
+  OopClosure* raw_strong_oops() { return &_strong._oops; }
+
+  // If we are not marking all weak roots then we are tracing
+  // which metadata is alive.
+  bool trace_metadata()         { return MarkWeak == G1MarkPromotedFromRoot; }
+};
+
+G1EvacuationRootClosures* G1EvacuationRootClosures::create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h) {
+  if (g1h->collector_state()->during_initial_mark_pause()) {
+    if (ClassUnloadingWithConcurrentMark) {
+      return new G1InitalMarkClosures<G1MarkPromotedFromRoot>(g1h, pss);
+    } else {
+      return new G1InitalMarkClosures<G1MarkFromRoot>(g1h, pss);
+    }
+  } else {
+    return new G1EvacuationClosures(g1h, pss, g1h->collector_state()->gcs_are_young());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/g1RootClosures.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
+#define SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
+
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+
+class G1CollectedHeap;
+class G1ParScanThreadState;
+
+class G1RootClosures : public CHeapObj<mtGC> {
+public:
+  // Closures to process raw oops in the root set.
+  virtual OopClosure* weak_oops() = 0;
+  virtual OopClosure* strong_oops() = 0;
+
+  // Closures to process CLDs in the root set.
+  virtual CLDClosure* weak_clds() = 0;
+  virtual CLDClosure* strong_clds() = 0;
+
+  // Applied to the CLDs reachable from the thread stacks.
+  virtual CLDClosure* thread_root_clds() = 0;
+
+  // Applied to code blobs reachable as strong roots.
+  virtual CodeBlobClosure* strong_codeblobs() = 0;
+};
+
+class G1EvacuationRootClosures : public G1RootClosures {
+public:
+  // Flush any buffered state and deferred processing
+  virtual void flush() = 0;
+  virtual double closure_app_seconds() = 0;
+
+  // Applied to the weakly reachable CLDs when all strongly reachable
+  // CLDs are guaranteed to have been processed.
+  virtual CLDClosure* second_pass_weak_clds() = 0;
+
+  // Get a raw oop closure for processing oops, bypassing the flushing above.
+  virtual OopClosure* raw_strong_oops() = 0;
+
+  // Applied to code blobs treated as weak roots.
+  virtual CodeBlobClosure* weak_codeblobs() = 0;
+
+  // Is this closure used for tracing metadata?
+  virtual bool trace_metadata() = 0;
+
+  static G1EvacuationRootClosures* create_root_closures(G1ParScanThreadState* pss, G1CollectedHeap* g1h);
+};
+
+#endif // SHARE_VM_GC_G1_G1ROOTCLOSURESET_HPP
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -33,7 +33,7 @@
 #include "gc/g1/g1CollectorPolicy.hpp"
 #include "gc/g1/g1CollectorState.hpp"
 #include "gc/g1/g1GCPhaseTimes.hpp"
-#include "gc/g1/g1RemSet.inline.hpp"
+#include "gc/g1/g1RootClosures.hpp"
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "memory/allocation.inline.hpp"
@@ -70,40 +70,19 @@
     _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never),
     _n_workers_discovered_strong_classes(0) {}
 
-void G1RootProcessor::evacuate_roots(OopClosure* scan_non_heap_roots,
-                                     OopClosure* scan_non_heap_weak_roots,
-                                     CLDClosure* scan_strong_clds,
-                                     CLDClosure* scan_weak_clds,
-                                     bool trace_metadata,
-                                     uint worker_i) {
-  // First scan the shared roots.
+void G1RootProcessor::evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i) {
   double ext_roots_start = os::elapsedTime();
   G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
 
-  BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
-  BufferingOopClosure buf_scan_non_heap_weak_roots(scan_non_heap_weak_roots);
-
-  OopClosure* const weak_roots = &buf_scan_non_heap_weak_roots;
-  OopClosure* const strong_roots = &buf_scan_non_heap_roots;
-
-  // CodeBlobClosures are not interoperable with BufferingOopClosures
-  G1CodeBlobClosure root_code_blobs(scan_non_heap_roots);
-
-  process_java_roots(strong_roots,
-                     trace_metadata ? scan_strong_clds : NULL,
-                     scan_strong_clds,
-                     trace_metadata ? NULL : scan_weak_clds,
-                     &root_code_blobs,
-                     phase_times,
-                     worker_i);
+  process_java_roots(closures, phase_times, worker_i);
 
   // This is the point where this worker thread will not find more strong CLDs/nmethods.
   // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing.
-  if (trace_metadata) {
+  if (closures->trace_metadata()) {
     worker_has_discovered_all_strong_classes();
   }
 
-  process_vm_roots(strong_roots, weak_roots, phase_times, worker_i);
+  process_vm_roots(closures, phase_times, worker_i);
 
   {
     // Now the CM ref_processor roots.
@@ -113,11 +92,11 @@
       // concurrent mark ref processor as roots and keep entries
       // (which are added by the marking threads) on them live
       // until they can be processed at the end of marking.
-      _g1h->ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
+      _g1h->ref_processor_cm()->weak_oops_do(closures->strong_oops());
     }
   }
 
-  if (trace_metadata) {
+  if (closures->trace_metadata()) {
     {
       G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WaitForStrongCLD, worker_i);
       // Barrier to make sure all workers passed
@@ -127,18 +106,18 @@
 
     // Now take the complement of the strong CLDs.
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::WeakCLDRoots, worker_i);
-    ClassLoaderDataGraph::roots_cld_do(NULL, scan_weak_clds);
+    assert(closures->second_pass_weak_clds() != NULL, "Should be non-null if we are tracing metadata.");
+    ClassLoaderDataGraph::roots_cld_do(NULL, closures->second_pass_weak_clds());
   } else {
     phase_times->record_time_secs(G1GCPhaseTimes::WaitForStrongCLD, worker_i, 0.0);
     phase_times->record_time_secs(G1GCPhaseTimes::WeakCLDRoots, worker_i, 0.0);
+    assert(closures->second_pass_weak_clds() == NULL, "Should be null if not tracing metadata.");
   }
 
   // Finish up any enqueued closure apps (attributed as object copy time).
-  buf_scan_non_heap_roots.done();
-  buf_scan_non_heap_weak_roots.done();
+  closures->flush();
 
-  double obj_copy_time_sec = buf_scan_non_heap_roots.closure_app_seconds()
-      + buf_scan_non_heap_weak_roots.closure_app_seconds();
+  double obj_copy_time_sec = closures->closure_app_seconds();
 
   phase_times->record_time_secs(G1GCPhaseTimes::ObjCopy, worker_i, obj_copy_time_sec);
 
@@ -159,22 +138,68 @@
   _process_strong_tasks.all_tasks_completed(n_workers());
 }
 
+// Adaptor to pass the closures to the strong roots in the VM.
+class StrongRootsClosures : public G1RootClosures {
+  OopClosure* _roots;
+  CLDClosure* _clds;
+  CodeBlobClosure* _blobs;
+public:
+  StrongRootsClosures(OopClosure* roots, CLDClosure* clds, CodeBlobClosure* blobs) :
+      _roots(roots), _clds(clds), _blobs(blobs) {}
+
+  OopClosure* weak_oops()   { return NULL; }
+  OopClosure* strong_oops() { return _roots; }
+
+  CLDClosure* weak_clds()        { return NULL; }
+  CLDClosure* strong_clds()      { return _clds; }
+  CLDClosure* thread_root_clds() { return _clds; }
+
+  CodeBlobClosure* strong_codeblobs() { return _blobs; }
+};
+
 void G1RootProcessor::process_strong_roots(OopClosure* oops,
                                            CLDClosure* clds,
                                            CodeBlobClosure* blobs) {
+  StrongRootsClosures closures(oops, clds, blobs);
 
-  process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0);
-  process_vm_roots(oops, NULL, NULL, 0);
+  process_java_roots(&closures, NULL, 0);
+  process_vm_roots(&closures, NULL, 0);
 
   _process_strong_tasks.all_tasks_completed(n_workers());
 }
 
+// Adaptor to pass the closures to all the roots in the VM.
+class AllRootsClosures : public G1RootClosures {
+  OopClosure* _roots;
+  CLDClosure* _clds;
+public:
+  AllRootsClosures(OopClosure* roots, CLDClosure* clds) :
+      _roots(roots), _clds(clds) {}
+
+  OopClosure* weak_oops() { return _roots; }
+  OopClosure* strong_oops() { return _roots; }
+
+  // By returning the same CLDClosure for both weak and strong CLDs we ensure
+  // that a single walk of the CLDG will invoke the closure on all CLDs i the
+  // system.
+  CLDClosure* weak_clds() { return _clds; }
+  CLDClosure* strong_clds() { return _clds; }
+  // We don't want to visit CLDs more than once, so we return NULL for the
+  // thread root CLDs.
+  CLDClosure* thread_root_clds() { return NULL; }
+
+  // We don't want to visit code blobs more than once, so we return NULL for the
+  // strong case and walk the entire code cache as a separate step.
+  CodeBlobClosure* strong_codeblobs() { return NULL; }
+};
+
 void G1RootProcessor::process_all_roots(OopClosure* oops,
                                         CLDClosure* clds,
                                         CodeBlobClosure* blobs) {
+  AllRootsClosures closures(oops, clds);
 
-  process_java_roots(oops, NULL, clds, clds, NULL, NULL, 0);
-  process_vm_roots(oops, oops, NULL, 0);
+  process_java_roots(&closures, NULL, 0);
+  process_vm_roots(&closures, NULL, 0);
 
   if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) {
     CodeCache::blobs_do(blobs);
@@ -183,35 +208,36 @@
   _process_strong_tasks.all_tasks_completed(n_workers());
 }
 
-void G1RootProcessor::process_java_roots(OopClosure* strong_roots,
-                                         CLDClosure* thread_stack_clds,
-                                         CLDClosure* strong_clds,
-                                         CLDClosure* weak_clds,
-                                         CodeBlobClosure* strong_code,
+void G1RootProcessor::process_java_roots(G1RootClosures* closures,
                                          G1GCPhaseTimes* phase_times,
                                          uint worker_i) {
-  assert(thread_stack_clds == NULL || weak_clds == NULL, "There is overlap between those, only one may be set");
+  assert(closures->thread_root_clds() == NULL || closures->weak_clds() == NULL, "There is overlap between those, only one may be set");
   // Iterating over the CLDG and the Threads are done early to allow us to
   // first process the strong CLDs and nmethods and then, after a barrier,
   // let the thread process the weak CLDs and nmethods.
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i);
     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) {
-      ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds);
+      ClassLoaderDataGraph::roots_cld_do(closures->strong_clds(), closures->weak_clds());
     }
   }
 
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ThreadRoots, worker_i);
     bool is_par = n_workers() > 1;
-    Threads::possibly_parallel_oops_do(is_par, strong_roots, thread_stack_clds, strong_code);
+    Threads::possibly_parallel_oops_do(is_par,
+                                       closures->strong_oops(),
+                                       closures->thread_root_clds(),
+                                       closures->strong_codeblobs());
   }
 }
 
-void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
-                                       OopClosure* weak_roots,
+void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
                                        G1GCPhaseTimes* phase_times,
                                        uint worker_i) {
+  OopClosure* strong_roots = closures->strong_oops();
+  OopClosure* weak_roots = closures->weak_oops();
+
   {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i);
     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) {
--- a/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1RootProcessor.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -32,8 +32,10 @@
 class CLDClosure;
 class CodeBlobClosure;
 class G1CollectedHeap;
+class G1EvacuationRootClosures;
 class G1GCPhaseTimes;
 class G1ParPushHeapRSClosure;
+class G1RootClosures;
 class Monitor;
 class OopClosure;
 class SubTasksDone;
@@ -71,16 +73,11 @@
   void worker_has_discovered_all_strong_classes();
   void wait_until_all_strong_classes_discovered();
 
-  void process_java_roots(OopClosure* scan_non_heap_roots,
-                          CLDClosure* thread_stack_clds,
-                          CLDClosure* scan_strong_clds,
-                          CLDClosure* scan_weak_clds,
-                          CodeBlobClosure* scan_strong_code,
+  void process_java_roots(G1RootClosures* closures,
                           G1GCPhaseTimes* phase_times,
                           uint worker_i);
 
-  void process_vm_roots(OopClosure* scan_non_heap_roots,
-                        OopClosure* scan_non_heap_weak_roots,
+  void process_vm_roots(G1RootClosures* closures,
                         G1GCPhaseTimes* phase_times,
                         uint worker_i);
 
@@ -90,12 +87,7 @@
   // Apply closures to the strongly and weakly reachable roots in the system
   // in a single pass.
   // Record and report timing measurements for sub phases using the worker_i
-  void evacuate_roots(OopClosure* scan_non_heap_roots,
-                      OopClosure* scan_non_heap_weak_roots,
-                      CLDClosure* scan_strong_clds,
-                      CLDClosure* scan_weak_clds,
-                      bool trace_metadata,
-                      uint worker_i);
+  void evacuate_roots(G1EvacuationRootClosures* closures, uint worker_i);
 
   // Apply oops, clds and blobs to all strongly reachable roots in the system
   void process_strong_roots(OopClosure* oops,
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -31,7 +31,6 @@
 #include "gc/g1/survRateGroup.hpp"
 #include "gc/shared/ageTable.hpp"
 #include "gc/shared/spaceDecorator.hpp"
-#include "gc/shared/watermark.hpp"
 #include "utilities/macros.hpp"
 
 // A HeapRegion is the smallest piece of a G1CollectedHeap that
--- a/hotspot/src/share/vm/gc/g1/survRateGroup.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/survRateGroup.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -24,15 +24,15 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/g1Predictions.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/survRateGroup.hpp"
 #include "memory/allocation.hpp"
 
-SurvRateGroup::SurvRateGroup(G1CollectorPolicy* g1p,
+SurvRateGroup::SurvRateGroup(G1Predictions* predictor,
                              const char* name,
                              size_t summary_surv_rates_len) :
-    _g1p(g1p), _name(name),
+    _predictor(predictor), _name(name),
     _summary_surv_rates_len(summary_surv_rates_len),
     _summary_surv_rates_max_len(0),
     _summary_surv_rates(NULL),
@@ -52,10 +52,13 @@
   start_adding_regions();
 }
 
+double SurvRateGroup::get_new_prediction(TruncatedSeq const* seq) const {
+  return _predictor->get_new_prediction(seq);
+}
+
 void SurvRateGroup::reset() {
   _all_regions_allocated = 0;
   _setup_seq_num         = 0;
-  _accum_surv_rate       = 0.0;
   _last_pred             = 0.0;
   // the following will set up the arrays with length 1
   _region_num            = 1;
@@ -76,15 +79,12 @@
   _region_num = 0;
 }
 
-void
-SurvRateGroup::start_adding_regions() {
+void SurvRateGroup::start_adding_regions() {
   _setup_seq_num   = _stats_arrays_length;
   _region_num      = 0;
-  _accum_surv_rate = 0.0;
 }
 
-void
-SurvRateGroup::stop_adding_regions() {
+void SurvRateGroup::stop_adding_regions() {
   if (_region_num > _stats_arrays_length) {
     double* old_surv_rate = _surv_rate;
     double* old_accum_surv_rate_pred = _accum_surv_rate_pred;
@@ -119,33 +119,12 @@
   }
 }
 
-double
-SurvRateGroup::accum_surv_rate(size_t adjustment) {
-  // we might relax this one in the future...
-  guarantee( adjustment == 0 || adjustment == 1, "pre-condition" );
-
-  double ret = _accum_surv_rate;
-  if (adjustment > 0) {
-    TruncatedSeq* seq = get_seq(_region_num+1);
-    double surv_rate = _g1p->get_new_prediction(seq);
-    ret += surv_rate;
-  }
-
-  return ret;
-}
-
-int
-SurvRateGroup::next_age_index() {
-  TruncatedSeq* seq = get_seq(_region_num);
-  double surv_rate = _g1p->get_new_prediction(seq);
-  _accum_surv_rate += surv_rate;
-
+int SurvRateGroup::next_age_index() {
   ++_region_num;
   return (int) ++_all_regions_allocated;
 }
 
-void
-SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
+void SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
   guarantee( 0 <= age_in_group && (size_t) age_in_group < _region_num,
              "pre-condition" );
   guarantee( _surv_rate[age_in_group] <= 0.00001,
@@ -161,9 +140,8 @@
   }
 }
 
-void
-SurvRateGroup::all_surviving_words_recorded(bool propagate) {
-  if (propagate && _region_num > 0) { // conservative
+void SurvRateGroup::all_surviving_words_recorded(bool update_predictors) {
+  if (update_predictors && _region_num > 0) { // conservative
     double surv_rate = _surv_rate_pred[_region_num-1]->last();
     for (size_t i = _region_num; i < _stats_arrays_length; ++i) {
       guarantee( _surv_rate[i] <= 0.00001,
@@ -175,24 +153,22 @@
   double accum = 0.0;
   double pred = 0.0;
   for (size_t i = 0; i < _stats_arrays_length; ++i) {
-    pred = _g1p->get_new_prediction(_surv_rate_pred[i]);
+    pred = get_new_prediction(_surv_rate_pred[i]);
     if (pred > 1.0) pred = 1.0;
     accum += pred;
     _accum_surv_rate_pred[i] = accum;
-    // gclog_or_tty->print_cr("age %3d, accum %10.2lf", i, accum);
   }
   _last_pred = pred;
 }
 
 #ifndef PRODUCT
-void
-SurvRateGroup::print() {
+void SurvRateGroup::print() {
   gclog_or_tty->print_cr("Surv Rate Group: %s (" SIZE_FORMAT " entries)",
                 _name, _region_num);
   for (size_t i = 0; i < _region_num; ++i) {
     gclog_or_tty->print_cr("    age " SIZE_FORMAT_W(4) "   surv rate %6.2lf %%   pred %6.2lf %%",
-                  i, _surv_rate[i] * 100.0,
-                  _g1p->get_new_prediction(_surv_rate_pred[i]) * 100.0);
+                           i, _surv_rate[i] * 100.0,
+                           _predictor->get_new_prediction(_surv_rate_pred[i]) * 100.0);
   }
 }
 
@@ -211,9 +187,9 @@
   size_t limit = MIN2((int) length, 10);
   while (index < limit) {
     gclog_or_tty->print_cr("           " SIZE_FORMAT_W(4)
-                  "                 %6.2lf%%             %6.2lf",
-                  index, _summary_surv_rates[index]->avg() * 100.0,
-                  (double) _summary_surv_rates[index]->num());
+                           "                 %6.2lf%%             %6.2lf",
+                           index, _summary_surv_rates[index]->avg() * 100.0,
+                           (double) _summary_surv_rates[index]->num());
     ++index;
   }
 
@@ -230,9 +206,9 @@
 
     if (index == length || num % 10 == 0) {
       gclog_or_tty->print_cr("   " SIZE_FORMAT_W(4) " .. " SIZE_FORMAT_W(4)
-                    "                 %6.2lf%%             %6.2lf",
-                    (index-1) / 10 * 10, index-1, sum / (double) num,
-                    (double) samples / (double) num);
+                             "                 %6.2lf%%             %6.2lf",
+                             (index-1) / 10 * 10, index-1, sum / (double) num,
+                             (double) samples / (double) num);
       sum = 0.0;
       num = 0;
       samples = 0;
--- a/hotspot/src/share/vm/gc/g1/survRateGroup.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/g1/survRateGroup.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -27,18 +27,20 @@
 
 #include "utilities/numberSeq.hpp"
 
-class G1CollectorPolicy;
+class G1Predictions;
 
 class SurvRateGroup : public CHeapObj<mtGC> {
 private:
-  G1CollectorPolicy* _g1p;
+  G1Predictions* _predictor;
+
+  double get_new_prediction(TruncatedSeq const* seq) const;
+
   const char* _name;
 
   size_t  _stats_arrays_length;
   double* _surv_rate;
   double* _accum_surv_rate_pred;
   double  _last_pred;
-  double  _accum_surv_rate;
   TruncatedSeq** _surv_rate_pred;
   NumberSeq**    _summary_surv_rates;
   size_t         _summary_surv_rates_len;
@@ -49,18 +51,18 @@
   size_t _setup_seq_num;
 
 public:
-  SurvRateGroup(G1CollectorPolicy* g1p,
+  SurvRateGroup(G1Predictions* predictor,
                 const char* name,
                 size_t summary_surv_rates_len);
   void reset();
   void start_adding_regions();
   void stop_adding_regions();
   void record_surviving_words(int age_in_group, size_t surv_words);
-  void all_surviving_words_recorded(bool propagate);
+  void all_surviving_words_recorded(bool update_predictors);
   const char* name() { return _name; }
 
   size_t region_num() { return _region_num; }
-  double accum_surv_rate_pred(int age) {
+  double accum_surv_rate_pred(int age) const {
     assert(age >= 0, "must be");
     if ((size_t)age < _stats_arrays_length)
       return _accum_surv_rate_pred[age];
@@ -70,9 +72,7 @@
     }
   }
 
-  double accum_surv_rate(size_t adjustment);
-
-  TruncatedSeq* get_seq(size_t age) {
+  TruncatedSeq* get_seq(size_t age) const {
     if (age >= _setup_seq_num) {
       guarantee( _setup_seq_num > 0, "invariant" );
       age = _setup_seq_num-1;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/youngList.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.hpp"
+#include "gc/g1/g1CollectorPolicy.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/g1/heapRegion.inline.hpp"
+#include "gc/g1/heapRegionRemSet.hpp"
+#include "gc/g1/youngList.hpp"
+#include "utilities/ostream.hpp"
+
+YoungList::YoungList(G1CollectedHeap* g1h) :
+    _g1h(g1h), _head(NULL), _length(0), _last_sampled_rs_lengths(0),
+    _survivor_head(NULL), _survivor_tail(NULL), _survivor_length(0) {
+  guarantee(check_list_empty(false), "just making sure...");
+}
+
+void YoungList::push_region(HeapRegion *hr) {
+  assert(!hr->is_young(), "should not already be young");
+  assert(hr->get_next_young_region() == NULL, "cause it should!");
+
+  hr->set_next_young_region(_head);
+  _head = hr;
+
+  _g1h->g1_policy()->set_region_eden(hr, (int) _length);
+  ++_length;
+}
+
+void YoungList::add_survivor_region(HeapRegion* hr) {
+  assert(hr->is_survivor(), "should be flagged as survivor region");
+  assert(hr->get_next_young_region() == NULL, "cause it should!");
+
+  hr->set_next_young_region(_survivor_head);
+  if (_survivor_head == NULL) {
+    _survivor_tail = hr;
+  }
+  _survivor_head = hr;
+  ++_survivor_length;
+}
+
+void YoungList::empty_list(HeapRegion* list) {
+  while (list != NULL) {
+    HeapRegion* next = list->get_next_young_region();
+    list->set_next_young_region(NULL);
+    list->uninstall_surv_rate_group();
+    // This is called before a Full GC and all the non-empty /
+    // non-humongous regions at the end of the Full GC will end up as
+    // old anyway.
+    list->set_old();
+    list = next;
+  }
+}
+
+void YoungList::empty_list() {
+  assert(check_list_well_formed(), "young list should be well formed");
+
+  empty_list(_head);
+  _head = NULL;
+  _length = 0;
+
+  empty_list(_survivor_head);
+  _survivor_head = NULL;
+  _survivor_tail = NULL;
+  _survivor_length = 0;
+
+  _last_sampled_rs_lengths = 0;
+
+  assert(check_list_empty(false), "just making sure...");
+}
+
+bool YoungList::check_list_well_formed() {
+  bool ret = true;
+
+  uint length = 0;
+  HeapRegion* curr = _head;
+  HeapRegion* last = NULL;
+  while (curr != NULL) {
+    if (!curr->is_young()) {
+      gclog_or_tty->print_cr("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " "
+                             "incorrectly tagged (y: %d, surv: %d)",
+                             p2i(curr->bottom()), p2i(curr->end()),
+                             curr->is_young(), curr->is_survivor());
+      ret = false;
+    }
+    ++length;
+    last = curr;
+    curr = curr->get_next_young_region();
+  }
+  ret = ret && (length == _length);
+
+  if (!ret) {
+    gclog_or_tty->print_cr("### YOUNG LIST seems not well formed!");
+    gclog_or_tty->print_cr("###   list has %u entries, _length is %u",
+                           length, _length);
+  }
+
+  return ret;
+}
+
+bool YoungList::check_list_empty(bool check_sample) {
+  bool ret = true;
+
+  if (_length != 0) {
+    gclog_or_tty->print_cr("### YOUNG LIST should have 0 length, not %u",
+                  _length);
+    ret = false;
+  }
+  if (check_sample && _last_sampled_rs_lengths != 0) {
+    gclog_or_tty->print_cr("### YOUNG LIST has non-zero last sampled RS lengths");
+    ret = false;
+  }
+  if (_head != NULL) {
+    gclog_or_tty->print_cr("### YOUNG LIST does not have a NULL head");
+    ret = false;
+  }
+  if (!ret) {
+    gclog_or_tty->print_cr("### YOUNG LIST does not seem empty");
+  }
+
+  return ret;
+}
+
+void
+YoungList::rs_length_sampling_init() {
+  _sampled_rs_lengths = 0;
+  _curr               = _head;
+}
+
+bool
+YoungList::rs_length_sampling_more() {
+  return _curr != NULL;
+}
+
+void
+YoungList::rs_length_sampling_next() {
+  assert( _curr != NULL, "invariant" );
+  size_t rs_length = _curr->rem_set()->occupied();
+
+  _sampled_rs_lengths += rs_length;
+
+  // The current region may not yet have been added to the
+  // incremental collection set (it gets added when it is
+  // retired as the current allocation region).
+  if (_curr->in_collection_set()) {
+    // Update the collection set policy information for this region
+    _g1h->g1_policy()->update_incremental_cset_info(_curr, rs_length);
+  }
+
+  _curr = _curr->get_next_young_region();
+  if (_curr == NULL) {
+    _last_sampled_rs_lengths = _sampled_rs_lengths;
+    // gclog_or_tty->print_cr("last sampled RS lengths = %d", _last_sampled_rs_lengths);
+  }
+}
+
+void
+YoungList::reset_auxilary_lists() {
+  guarantee( is_empty(), "young list should be empty" );
+  assert(check_list_well_formed(), "young list should be well formed");
+
+  // Add survivor regions to SurvRateGroup.
+  _g1h->g1_policy()->note_start_adding_survivor_regions();
+  _g1h->g1_policy()->finished_recalculating_age_indexes(true /* is_survivors */);
+
+  int young_index_in_cset = 0;
+  for (HeapRegion* curr = _survivor_head;
+       curr != NULL;
+       curr = curr->get_next_young_region()) {
+    _g1h->g1_policy()->set_region_survivor(curr, young_index_in_cset);
+
+    // The region is a non-empty survivor so let's add it to
+    // the incremental collection set for the next evacuation
+    // pause.
+    _g1h->g1_policy()->add_region_to_incremental_cset_rhs(curr);
+    young_index_in_cset += 1;
+  }
+  assert((uint) young_index_in_cset == _survivor_length, "post-condition");
+  _g1h->g1_policy()->note_stop_adding_survivor_regions();
+
+  _head   = _survivor_head;
+  _length = _survivor_length;
+  if (_survivor_head != NULL) {
+    assert(_survivor_tail != NULL, "cause it shouldn't be");
+    assert(_survivor_length > 0, "invariant");
+    _survivor_tail->set_next_young_region(NULL);
+  }
+
+  // Don't clear the survivor list handles until the start of
+  // the next evacuation pause - we need it in order to re-tag
+  // the survivor regions from this evacuation pause as 'young'
+  // at the start of the next.
+
+  _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
+
+  assert(check_list_well_formed(), "young list should be well formed");
+}
+
+void YoungList::print() {
+  HeapRegion* lists[] = {_head,   _survivor_head};
+  const char* names[] = {"YOUNG", "SURVIVOR"};
+
+  for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
+    gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
+    HeapRegion *curr = lists[list];
+    if (curr == NULL) {
+      gclog_or_tty->print_cr("  empty");
+    }
+    while (curr != NULL) {
+      gclog_or_tty->print_cr("  " HR_FORMAT ", P: " PTR_FORMAT ", N: " PTR_FORMAT ", age: %4d",
+                             HR_FORMAT_PARAMS(curr),
+                             p2i(curr->prev_top_at_mark_start()),
+                             p2i(curr->next_top_at_mark_start()),
+                             curr->age_in_surv_rate_group_cond());
+      curr = curr->get_next_young_region();
+    }
+  }
+
+  gclog_or_tty->cr();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc/g1/youngList.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_YOUNGLIST_HPP
+#define SHARE_VM_GC_G1_YOUNGLIST_HPP
+
+#include "memory/allocation.hpp"
+#include "runtime/globals.hpp"
+
+class YoungList : public CHeapObj<mtGC> {
+private:
+  G1CollectedHeap* _g1h;
+
+  HeapRegion* _head;
+
+  HeapRegion* _survivor_head;
+  HeapRegion* _survivor_tail;
+
+  HeapRegion* _curr;
+
+  uint        _length;
+  uint        _survivor_length;
+
+  size_t      _last_sampled_rs_lengths;
+  size_t      _sampled_rs_lengths;
+
+  void         empty_list(HeapRegion* list);
+
+public:
+  YoungList(G1CollectedHeap* g1h);
+
+  void         push_region(HeapRegion* hr);
+  void         add_survivor_region(HeapRegion* hr);
+
+  void         empty_list();
+  bool         is_empty() { return _length == 0; }
+  uint         length() { return _length; }
+  uint         eden_length() { return length() - survivor_length(); }
+  uint         survivor_length() { return _survivor_length; }
+
+  // Currently we do not keep track of the used byte sum for the
+  // young list and the survivors and it'd be quite a lot of work to
+  // do so. When we'll eventually replace the young list with
+  // instances of HeapRegionLinkedList we'll get that for free. So,
+  // we'll report the more accurate information then.
+  size_t       eden_used_bytes() {
+    assert(length() >= survivor_length(), "invariant");
+    return (size_t) eden_length() * HeapRegion::GrainBytes;
+  }
+  size_t       survivor_used_bytes() {
+    return (size_t) survivor_length() * HeapRegion::GrainBytes;
+  }
+
+  void rs_length_sampling_init();
+  bool rs_length_sampling_more();
+  void rs_length_sampling_next();
+
+  void reset_sampled_info() {
+    _last_sampled_rs_lengths =   0;
+  }
+  size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
+
+  // for development purposes
+  void reset_auxilary_lists();
+  void clear() { _head = NULL; _length = 0; }
+
+  void clear_survivors() {
+    _survivor_head    = NULL;
+    _survivor_tail    = NULL;
+    _survivor_length  = 0;
+  }
+
+  HeapRegion* first_region() { return _head; }
+  HeapRegion* first_survivor_region() { return _survivor_head; }
+  HeapRegion* last_survivor_region() { return _survivor_tail; }
+
+  // debugging
+  bool          check_list_well_formed();
+  bool          check_list_empty(bool check_sample = true);
+  void          print();
+};
+
+#endif // SHARE_VM_GC_G1_YOUNGLIST_HPP
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -529,10 +529,7 @@
       set_decide_at_full_gc(decide_at_full_gc_true);
       adjust_promo_for_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
     }
-  } else if (_avg_minor_pause->padded_average() > gc_minor_pause_goal_sec()) {
-    // Adjust only for the minor pause time goal
-    adjust_promo_for_minor_pause_time(is_full_gc, &desired_promo_size, &desired_eden_size);
-  } else if(adjusted_mutator_cost() < _throughput_goal) {
+  } else if (adjusted_mutator_cost() < _throughput_goal) {
     // This branch used to require that (mutator_cost() > 0.0 in 1.4.2.
     // This sometimes resulted in skipping to the minimize footprint
     // code.  Change this to try and reduce GC time if mutator time is
@@ -670,36 +667,6 @@
   }
 }
 
-void PSAdaptiveSizePolicy::adjust_promo_for_minor_pause_time(bool is_full_gc,
-    size_t* desired_promo_size_ptr, size_t* desired_eden_size_ptr) {
-
-  if (PSAdjustTenuredGenForMinorPause) {
-    if (is_full_gc) {
-      set_decide_at_full_gc(decide_at_full_gc_true);
-    }
-    // If the desired eden size is as small as it will get,
-    // try to adjust the old gen size.
-    if (*desired_eden_size_ptr <= _space_alignment) {
-      // Vary the old gen size to reduce the young gen pause.  This
-      // may not be a good idea.  This is just a test.
-      if (minor_pause_old_estimator()->decrement_will_decrease()) {
-        set_change_old_gen_for_min_pauses(decrease_old_gen_for_min_pauses_true);
-        *desired_promo_size_ptr =
-          _promo_size - promo_decrement_aligned_down(*desired_promo_size_ptr);
-      } else {
-        set_change_old_gen_for_min_pauses(increase_old_gen_for_min_pauses_true);
-        size_t promo_heap_delta =
-          promo_increment_with_supplement_aligned_up(*desired_promo_size_ptr);
-        if ((*desired_promo_size_ptr + promo_heap_delta) >
-            *desired_promo_size_ptr) {
-          *desired_promo_size_ptr =
-            _promo_size + promo_heap_delta;
-        }
-      }
-    }
-  }
-}
-
 void PSAdaptiveSizePolicy::adjust_eden_for_minor_pause_time(bool is_full_gc,
     size_t* desired_eden_size_ptr) {
 
@@ -733,10 +700,7 @@
   // a change less than the required alignment is probably not worth
   // attempting.
 
-  if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
-    adjust_promo_for_minor_pause_time(is_full_gc, desired_promo_size_ptr, desired_eden_size_ptr);
-    // major pause adjustments
-  } else if (is_full_gc) {
+  if (_avg_minor_pause->padded_average() <= _avg_major_pause->padded_average() && is_full_gc) {
     // Adjust for the major pause time only at full gc's because the
     // affects of a change can only be seen at full gc's.
 
@@ -774,44 +738,8 @@
   // a change less than the required alignment is probably not worth
   // attempting.
   if (_avg_minor_pause->padded_average() > _avg_major_pause->padded_average()) {
-    adjust_eden_for_minor_pause_time(is_full_gc,
-                                desired_eden_size_ptr);
-    // major pause adjustments
-  } else if (is_full_gc) {
-    // Adjust for the major pause time only at full gc's because the
-    // affects of a change can only be seen at full gc's.
-    if (PSAdjustYoungGenForMajorPause) {
-      // If the promo size is at the minimum (i.e., the old gen
-      // size will not actually decrease), consider changing the
-      // young gen size.
-      if (*desired_promo_size_ptr < _space_alignment) {
-        // If increasing the young generation will decrease the old gen
-        // pause, do it.
-        // During startup there is noise in the statistics for deciding
-        // on whether to increase or decrease the young gen size.  For
-        // some number of iterations, just try to increase the young
-        // gen size if the major pause is too long to try and establish
-        // good statistics for later decisions.
-        if (major_pause_young_estimator()->increment_will_decrease() ||
-          (_young_gen_change_for_major_pause_count
-            <= AdaptiveSizePolicyInitializingSteps)) {
-          set_change_young_gen_for_maj_pauses(
-          increase_young_gen_for_maj_pauses_true);
-          eden_heap_delta = eden_increment_aligned_up(*desired_eden_size_ptr);
-          *desired_eden_size_ptr = _eden_size + eden_heap_delta;
-          _young_gen_change_for_major_pause_count++;
-        } else {
-          // Record that decreasing the young gen size would decrease
-          // the major pause
-          set_change_young_gen_for_maj_pauses(
-            decrease_young_gen_for_maj_pauses_true);
-          eden_heap_delta = eden_decrement_aligned_down(*desired_eden_size_ptr);
-          *desired_eden_size_ptr = _eden_size - eden_heap_delta;
-        }
-      }
-    }
+    adjust_eden_for_minor_pause_time(is_full_gc, desired_eden_size_ptr);
   }
-
   if (PrintAdaptiveSizePolicy && Verbose) {
     gclog_or_tty->print_cr(
       "PSAdaptiveSizePolicy::adjust_eden_for_pause_time "
--- a/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psAdaptiveSizePolicy.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -134,10 +134,6 @@
   AdaptivePaddedAverage* avg_major_pause() const { return _avg_major_pause; }
   double gc_minor_pause_goal_sec() const { return _gc_minor_pause_goal_sec; }
 
-  // Change the young generation size to achieve a minor GC pause time goal
-  void adjust_promo_for_minor_pause_time(bool is_full_gc,
-                                   size_t* desired_promo_size_ptr,
-                                   size_t* desired_eden_size_ptr);
   void adjust_eden_for_minor_pause_time(bool is_full_gc,
                                    size_t* desired_eden_size_ptr);
   // Change the generation sizes to achieve a GC pause time goal
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -1351,13 +1351,6 @@
 PSParallelCompact::compute_dense_prefix(const SpaceId id,
                                         bool maximum_compaction)
 {
-  if (ParallelOldGCSplitALot) {
-    if (_space_info[id].dense_prefix() != _space_info[id].space()->bottom()) {
-      // The value was chosen to provoke splitting a young gen space; use it.
-      return _space_info[id].dense_prefix();
-    }
-  }
-
   const size_t region_size = ParallelCompactData::RegionSize;
   const ParallelCompactData& sd = summary_data();
 
@@ -1430,220 +1423,9 @@
     }
   }
 
-#if     0
-  // Something to consider:  if the region with the best ratio is 'close to' the
-  // first region w/free space, choose the first region with free space
-  // ("first-free").  The first-free region is usually near the start of the
-  // heap, which means we are copying most of the heap already, so copy a bit
-  // more to get complete compaction.
-  if (pointer_delta(best_cp, full_cp, sizeof(RegionData)) < 4) {
-    _maximum_compaction_gc_num = total_invocations();
-    best_cp = full_cp;
-  }
-#endif  // #if 0
-
   return sd.region_to_addr(best_cp);
 }
 
-#ifndef PRODUCT
-void
-PSParallelCompact::fill_with_live_objects(SpaceId id, HeapWord* const start,
-                                          size_t words)
-{
-  if (TraceParallelOldGCSummaryPhase) {
-    tty->print_cr("fill_with_live_objects [" PTR_FORMAT " " PTR_FORMAT ") "
-                  SIZE_FORMAT, p2i(start), p2i(start + words), words);
-  }
-
-  ObjectStartArray* const start_array = _space_info[id].start_array();
-  CollectedHeap::fill_with_objects(start, words);
-  for (HeapWord* p = start; p < start + words; p += oop(p)->size()) {
-    _mark_bitmap.mark_obj(p, words);
-    _summary_data.add_obj(p, words);
-    start_array->allocate_block(p);
-  }
-}
-
-void
-PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
-{
-  ParallelCompactData& sd = summary_data();
-  MutableSpace* space = _space_info[id].space();
-
-  // Find the source and destination start addresses.
-  HeapWord* const src_addr = sd.region_align_down(start);
-  HeapWord* dst_addr;
-  if (src_addr < start) {
-    dst_addr = sd.addr_to_region_ptr(src_addr)->destination();
-  } else if (src_addr > space->bottom()) {
-    // The start (the original top() value) is aligned to a region boundary so
-    // the associated region does not have a destination.  Compute the
-    // destination from the previous region.
-    RegionData* const cp = sd.addr_to_region_ptr(src_addr) - 1;
-    dst_addr = cp->destination() + cp->data_size();
-  } else {
-    // Filling the entire space.
-    dst_addr = space->bottom();
-  }
-  assert(dst_addr != NULL, "sanity");
-
-  // Update the summary data.
-  bool result = _summary_data.summarize(_space_info[id].split_info(),
-                                        src_addr, space->top(), NULL,
-                                        dst_addr, space->end(),
-                                        _space_info[id].new_top_addr());
-  assert(result, "should not fail:  bad filler object size");
-}
-
-void
-PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
-{
-  if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) {
-    return;
-  }
-
-  MutableSpace* const space = _space_info[id].space();
-  if (space->is_empty()) {
-    HeapWord* b = space->bottom();
-    HeapWord* t = b + space->capacity_in_words() / 2;
-    space->set_top(t);
-    if (ZapUnusedHeapArea) {
-      space->set_top_for_allocations();
-    }
-
-    size_t min_size = CollectedHeap::min_fill_size();
-    size_t obj_len = min_size;
-    while (b + obj_len <= t) {
-      CollectedHeap::fill_with_object(b, obj_len);
-      mark_bitmap()->mark_obj(b, obj_len);
-      summary_data().add_obj(b, obj_len);
-      b += obj_len;
-      obj_len = (obj_len & (min_size*3)) + min_size; // 8 16 24 32 8 16 24 32 ...
-    }
-    if (b < t) {
-      // The loop didn't completely fill to t (top); adjust top downward.
-      space->set_top(b);
-      if (ZapUnusedHeapArea) {
-        space->set_top_for_allocations();
-      }
-    }
-
-    HeapWord** nta = _space_info[id].new_top_addr();
-    bool result = summary_data().summarize(_space_info[id].split_info(),
-                                           space->bottom(), space->top(), NULL,
-                                           space->bottom(), space->end(), nta);
-    assert(result, "space must fit into itself");
-  }
-}
-
-void
-PSParallelCompact::provoke_split(bool & max_compaction)
-{
-  if (total_invocations() % ParallelOldGCSplitInterval != 0) {
-    return;
-  }
-
-  const size_t region_size = ParallelCompactData::RegionSize;
-  ParallelCompactData& sd = summary_data();
-
-  MutableSpace* const eden_space = _space_info[eden_space_id].space();
-  MutableSpace* const from_space = _space_info[from_space_id].space();
-  const size_t eden_live = pointer_delta(eden_space->top(),
-                                         _space_info[eden_space_id].new_top());
-  const size_t from_live = pointer_delta(from_space->top(),
-                                         _space_info[from_space_id].new_top());
-
-  const size_t min_fill_size = CollectedHeap::min_fill_size();
-  const size_t eden_free = pointer_delta(eden_space->end(), eden_space->top());
-  const size_t eden_fillable = eden_free >= min_fill_size ? eden_free : 0;
-  const size_t from_free = pointer_delta(from_space->end(), from_space->top());
-  const size_t from_fillable = from_free >= min_fill_size ? from_free : 0;
-
-  // Choose the space to split; need at least 2 regions live (or fillable).
-  SpaceId id;
-  MutableSpace* space;
-  size_t live_words;
-  size_t fill_words;
-  if (eden_live + eden_fillable >= region_size * 2) {
-    id = eden_space_id;
-    space = eden_space;
-    live_words = eden_live;
-    fill_words = eden_fillable;
-  } else if (from_live + from_fillable >= region_size * 2) {
-    id = from_space_id;
-    space = from_space;
-    live_words = from_live;
-    fill_words = from_fillable;
-  } else {
-    return; // Give up.
-  }
-  assert(fill_words == 0 || fill_words >= min_fill_size, "sanity");
-
-  if (live_words < region_size * 2) {
-    // Fill from top() to end() w/live objects of mixed sizes.
-    HeapWord* const fill_start = space->top();
-    live_words += fill_words;
-
-    space->set_top(fill_start + fill_words);
-    if (ZapUnusedHeapArea) {
-      space->set_top_for_allocations();
-    }
-
-    HeapWord* cur_addr = fill_start;
-    while (fill_words > 0) {
-      const size_t r = (size_t)os::random() % (region_size / 2) + min_fill_size;
-      size_t cur_size = MIN2(align_object_size_(r), fill_words);
-      if (fill_words - cur_size < min_fill_size) {
-        cur_size = fill_words; // Avoid leaving a fragment too small to fill.
-      }
-
-      CollectedHeap::fill_with_object(cur_addr, cur_size);
-      mark_bitmap()->mark_obj(cur_addr, cur_size);
-      sd.add_obj(cur_addr, cur_size);
-
-      cur_addr += cur_size;
-      fill_words -= cur_size;
-    }
-
-    summarize_new_objects(id, fill_start);
-  }
-
-  max_compaction = false;
-
-  // Manipulate the old gen so that it has room for about half of the live data
-  // in the target young gen space (live_words / 2).
-  id = old_space_id;
-  space = _space_info[id].space();
-  const size_t free_at_end = space->free_in_words();
-  const size_t free_target = align_object_size(live_words / 2);
-  const size_t dead = pointer_delta(space->top(), _space_info[id].new_top());
-
-  if (free_at_end >= free_target + min_fill_size) {
-    // Fill space above top() and set the dense prefix so everything survives.
-    HeapWord* const fill_start = space->top();
-    const size_t fill_size = free_at_end - free_target;
-    space->set_top(space->top() + fill_size);
-    if (ZapUnusedHeapArea) {
-      space->set_top_for_allocations();
-    }
-    fill_with_live_objects(id, fill_start, fill_size);
-    summarize_new_objects(id, fill_start);
-    _space_info[id].set_dense_prefix(sd.region_align_down(space->top()));
-  } else if (dead + free_at_end > free_target) {
-    // Find a dense prefix that makes the right amount of space available.
-    HeapWord* cur = sd.region_align_down(space->top());
-    HeapWord* cur_destination = sd.addr_to_region_ptr(cur)->destination();
-    size_t dead_to_right = pointer_delta(space->end(), cur_destination);
-    while (dead_to_right < free_target) {
-      cur -= region_size;
-      cur_destination = sd.addr_to_region_ptr(cur)->destination();
-      dead_to_right = pointer_delta(space->end(), cur_destination);
-    }
-    _space_info[id].set_dense_prefix(cur);
-  }
-}
-#endif // #ifndef PRODUCT
-
 void PSParallelCompact::summarize_spaces_quick()
 {
   for (unsigned int i = 0; i < last_space_id; ++i) {
@@ -1655,12 +1437,6 @@
     assert(result, "space must fit into itself");
     _space_info[i].set_dense_prefix(space->bottom());
   }
-
-#ifndef PRODUCT
-  if (ParallelOldGCSplitALot) {
-    provoke_split_fill_survivor(to_space_id);
-  }
-#endif // #ifndef PRODUCT
 }
 
 void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
@@ -1745,8 +1521,7 @@
 PSParallelCompact::summarize_space(SpaceId id, bool maximum_compaction)
 {
   assert(id < last_space_id, "id out of range");
-  assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom() ||
-         ParallelOldGCSplitALot && id == old_space_id,
+  assert(_space_info[id].dense_prefix() == _space_info[id].space()->bottom(),
          "should have been reset in summarize_spaces_quick()");
 
   const MutableSpace* space = _space_info[id].space();
@@ -1866,11 +1641,6 @@
     // XXX - should also try to expand
     maximum_compaction = true;
   }
-#ifndef PRODUCT
-  if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
-    provoke_split(maximum_compaction);
-  }
-#endif // #ifndef PRODUCT
 
   // Old generations.
   summarize_space(old_space_id, maximum_compaction);
--- a/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psParallelCompact.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -1059,24 +1059,6 @@
   // Clear the summary data source_region field for the specified addresses.
   static void clear_source_region(HeapWord* beg_addr, HeapWord* end_addr);
 
-#ifndef PRODUCT
-  // Routines to provoke splitting a young gen space (ParallelOldGCSplitALot).
-
-  // Fill the region [start, start + words) with live object(s).  Only usable
-  // for the old and permanent generations.
-  static void fill_with_live_objects(SpaceId id, HeapWord* const start,
-                                     size_t words);
-  // Include the new objects in the summary data.
-  static void summarize_new_objects(SpaceId id, HeapWord* start);
-
-  // Add live objects to a survivor space since it's rare that both survivors
-  // are non-empty.
-  static void provoke_split_fill_survivor(SpaceId id);
-
-  // Add live objects and/or choose the dense prefix to provoke splitting.
-  static void provoke_split(bool & maximum_compaction);
-#endif
-
   static void summarize_spaces_quick();
   static void summarize_space(SpaceId id, bool maximum_compaction);
   static void summary_phase(ParCompactionManager* cm, bool maximum_compaction);
--- a/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/parallel/psScavenge.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -297,11 +297,6 @@
     young_gen->eden_space()->accumulate_statistics();
   }
 
-  if (ZapUnusedHeapArea) {
-    // Save information needed to minimize mangling
-    heap->record_gen_tops_before_GC();
-  }
-
   heap->print_heap_before_gc();
   heap->trace_heap_before_gc(&_gc_tracer);
 
@@ -344,13 +339,10 @@
       CardTableExtension::verify_all_young_refs_imprecise();
     }
 
-    if (!ScavengeWithObjectsInToSpace) {
-      assert(young_gen->to_space()->is_empty(),
-             "Attempt to scavenge with live objects in to_space");
-      young_gen->to_space()->clear(SpaceDecorator::Mangle);
-    } else if (ZapUnusedHeapArea) {
-      young_gen->to_space()->mangle_unused_area();
-    }
+    assert(young_gen->to_space()->is_empty(),
+           "Attempt to scavenge with live objects in to_space");
+    young_gen->to_space()->clear(SpaceDecorator::Mangle);
+
     save_to_space_top_before_gc();
 
 #if defined(COMPILER2) || INCLUDE_JVMCI
@@ -681,12 +673,6 @@
   heap->print_heap_after_gc();
   heap->trace_heap_after_gc(&_gc_tracer);
 
-  if (ZapUnusedHeapArea) {
-    young_gen->eden_space()->check_mangled_unused_area_complete();
-    young_gen->from_space()->check_mangled_unused_area_complete();
-    young_gen->to_space()->check_mangled_unused_area_complete();
-  }
-
   scavenge_exit.update();
 
   if (PrintGCTaskTimeStamps) {
@@ -768,15 +754,13 @@
   PSYoungGen* young_gen = heap->young_gen();
   PSOldGen* old_gen = heap->old_gen();
 
-  if (!ScavengeWithObjectsInToSpace) {
-    // Do not attempt to promote unless to_space is empty
-    if (!young_gen->to_space()->is_empty()) {
-      _consecutive_skipped_scavenges++;
-      if (UsePerfData) {
-        counters->update_scavenge_skipped(to_space_not_empty);
-      }
-      return false;
+  // Do not attempt to promote unless to_space is empty
+  if (!young_gen->to_space()->is_empty()) {
+    _consecutive_skipped_scavenges++;
+    if (UsePerfData) {
+      counters->update_scavenge_skipped(to_space_not_empty);
     }
+    return false;
   }
 
   // Test to see if the scavenge will likely fail.
--- a/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/defNewGeneration.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/serial/defNewGeneration.inline.hpp"
+#include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/gcHeapSummary.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
@@ -33,7 +34,6 @@
 #include "gc/shared/gcTraceTime.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/genRemSet.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/referencePolicy.hpp"
 #include "gc/shared/space.inline.hpp"
@@ -69,8 +69,7 @@
 
 DefNewGeneration::KeepAliveClosure::
 KeepAliveClosure(ScanWeakRefClosure* cl) : _cl(cl) {
-  GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
-  _rs = (CardTableRS*)rs;
+  _rs = GenCollectedHeap::heap()->rem_set();
 }
 
 void DefNewGeneration::KeepAliveClosure::do_oop(oop* p)       { DefNewGeneration::KeepAliveClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/genMarkSweep.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -117,7 +117,7 @@
   // can clear the card table.  Otherwise, we must invalidate
   // it (consider all cards dirty).  In the future, we might consider doing
   // compaction within generations only, and doing card-table sliding.
-  GenRemSet* rs = gch->rem_set();
+  CardTableRS* rs = gch->rem_set();
   Generation* old_gen = gch->old_gen();
 
   // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
--- a/hotspot/src/share/vm/gc/serial/markSweep.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/markSweep.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -196,7 +196,9 @@
   virtual void do_cld(ClassLoaderData* cld);
   void do_cld_nv(ClassLoaderData* cld);
 
-  void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
+  void set_ref_processor(ReferenceProcessor* rp) {
+    set_ref_processor_internal(rp);
+  }
 };
 
 class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -42,7 +42,7 @@
 
 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
                                      size_t initial_byte_size,
-                                     GenRemSet* remset) :
+                                     CardTableRS* remset) :
   CardGeneration(rs, initial_byte_size, remset)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
--- a/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/serial/tenuredGeneration.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -58,7 +58,7 @@
  public:
   TenuredGeneration(ReservedSpace rs,
                     size_t initial_byte_size,
-                    GenRemSet* remset);
+                    CardTableRS* remset);
 
   Generation::Name kind() { return Generation::MarkSweepCompact; }
 
--- a/hotspot/src/share/vm/gc/shared/cardGeneration.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/cardGeneration.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -26,9 +26,9 @@
 
 #include "gc/shared/blockOffsetTable.inline.hpp"
 #include "gc/shared/cardGeneration.inline.hpp"
+#include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/gcLocker.hpp"
 #include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/genRemSet.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/space.inline.hpp"
 #include "memory/iterator.hpp"
@@ -37,7 +37,7 @@
 
 CardGeneration::CardGeneration(ReservedSpace rs,
                                size_t initial_byte_size,
-                               GenRemSet* remset) :
+                               CardTableRS* remset) :
   Generation(rs, initial_byte_size), _rs(remset),
   _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
   _used_at_prologue()
--- a/hotspot/src/share/vm/gc/shared/cardGeneration.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/cardGeneration.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -37,7 +37,7 @@
   friend class VMStructs;
  protected:
   // This is shared with other generations.
-  GenRemSet* _rs;
+  CardTableRS* _rs;
   // This is local to this generation.
   BlockOffsetSharedArray* _bts;
 
@@ -52,7 +52,7 @@
   size_t _capacity_at_prologue;
   size_t _used_at_prologue;
 
-  CardGeneration(ReservedSpace rs, size_t initial_byte_size, GenRemSet* remset);
+  CardGeneration(ReservedSpace rs, size_t initial_byte_size, CardTableRS* remset);
 
   virtual void assert_correct_size_change_locking() = 0;
 
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/cardTableRS.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -34,8 +34,48 @@
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
 
+class HasAccumulatedModifiedOopsClosure : public KlassClosure {
+  bool _found;
+ public:
+  HasAccumulatedModifiedOopsClosure() : _found(false) {}
+  void do_klass(Klass* klass) {
+    if (_found) {
+      return;
+    }
+
+    if (klass->has_accumulated_modified_oops()) {
+      _found = true;
+    }
+  }
+  bool found() {
+    return _found;
+  }
+};
+
+bool KlassRemSet::mod_union_is_clear() {
+  HasAccumulatedModifiedOopsClosure closure;
+  ClassLoaderDataGraph::classes_do(&closure);
+
+  return !closure.found();
+}
+
+
+class ClearKlassModUnionClosure : public KlassClosure {
+ public:
+  void do_klass(Klass* klass) {
+    if (klass->has_accumulated_modified_oops()) {
+      klass->clear_accumulated_modified_oops();
+    }
+  }
+};
+
+void KlassRemSet::clear_mod_union() {
+  ClearKlassModUnionClosure closure;
+  ClassLoaderDataGraph::classes_do(&closure);
+}
+
 CardTableRS::CardTableRS(MemRegion whole_heap) :
-  GenRemSet(),
+  _bs(NULL),
   _cur_youngergen_card_val(youngergenP1_card)
 {
   _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
--- a/hotspot/src/share/vm/gc/shared/cardTableRS.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/cardTableRS.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -26,16 +26,26 @@
 #define SHARE_VM_GC_SHARED_CARDTABLERS_HPP
 
 #include "gc/shared/cardTableModRefBSForCTRS.hpp"
-#include "gc/shared/genRemSet.hpp"
 #include "memory/memRegion.hpp"
 
 class Space;
 class OopsInGenClosure;
 
-// This kind of "GenRemSet" uses a card table both as shared data structure
+// Helper to remember modified oops in all klasses.
+class KlassRemSet {
+  bool _accumulate_modified_oops;
+ public:
+  KlassRemSet() : _accumulate_modified_oops(false) {}
+  void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
+  bool accumulate_modified_oops() { return _accumulate_modified_oops; }
+  bool mod_union_is_clear();
+  void clear_mod_union();
+};
+
+// This RemSet uses a card table both as shared data structure
 // for a mod ref barrier set and for the rem set information.
 
-class CardTableRS: public GenRemSet {
+class CardTableRS: public CHeapObj<mtGC> {
   friend class VMStructs;
   // Below are private classes used in impl.
   friend class VerifyCTSpaceClosure;
@@ -54,9 +64,10 @@
     return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
   }
 
-  CardTableModRefBSForCTRS* _ct_bs;
+  KlassRemSet _klass_rem_set;
+  BarrierSet* _bs;
 
-  virtual void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
+  CardTableModRefBSForCTRS* _ct_bs;
 
   void verify_space(Space* s, HeapWord* gen_start);
 
@@ -104,11 +115,18 @@
   CardTableRS(MemRegion whole_heap);
   ~CardTableRS();
 
-  // *** GenRemSet functions.
-  CardTableRS* as_CardTableRS() { return this; }
+  // Return the barrier set associated with "this."
+  BarrierSet* bs() { return _bs; }
+
+  // Set the barrier set.
+  void set_bs(BarrierSet* bs) { _bs = bs; }
+
+  KlassRemSet* klass_rem_set() { return &_klass_rem_set; }
 
   CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }
 
+  void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads);
+
   // Override.
   void prepare_for_younger_refs_iterate(bool parallel);
 
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -152,7 +152,7 @@
   return result;
 }
 
-GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
+CardTableRS* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
   return new CardTableRS(whole_heap);
 }
 
@@ -173,7 +173,7 @@
   // byte entry and the os page size is 4096, the maximum heap size should
   // be 512*4096 = 2MB aligned.
 
-  size_t alignment = GenRemSet::max_alignment_constraint();
+  size_t alignment = CardTableRS::ct_max_alignment_constraint();
 
   if (UseLargePages) {
       // In presence of large pages we have to make sure that our
--- a/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/collectorPolicy.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -26,7 +26,7 @@
 #define SHARE_VM_GC_SHARED_COLLECTORPOLICY_HPP
 
 #include "gc/shared/barrierSet.hpp"
-#include "gc/shared/genRemSet.hpp"
+#include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/macros.hpp"
@@ -143,7 +143,7 @@
 #endif // INCLUDE_ALL_GCS
 
 
-  virtual GenRemSet* create_rem_set(MemRegion reserved);
+  virtual CardTableRS* create_rem_set(MemRegion reserved);
 
   // This method controls how a collector satisfies a request
   // for a block of memory.  "gc_time_limit_was_exceeded" will
--- a/hotspot/src/share/vm/gc/shared/gcId.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/gcId.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -60,12 +60,12 @@
 }
 
 GCIdMarkAndRestore::GCIdMarkAndRestore() : _gc_id(GCId::create()) {
-  _previous_gc_id = GCId::current(); // will assert that the GC Id is not undefined
+  _previous_gc_id = GCId::current_raw();
   currentNamedthread()->set_gc_id(_gc_id);
 }
 
 GCIdMarkAndRestore::GCIdMarkAndRestore(uint gc_id) : _gc_id(gc_id) {
-  _previous_gc_id = GCId::current(); // will assert that the GC Id is not undefinied
+  _previous_gc_id = GCId::current_raw();
   currentNamedthread()->set_gc_id(_gc_id);
 }
 
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -433,7 +433,7 @@
     return; // GC is disabled (e.g. JNI GetXXXCritical operation)
   }
 
-  GCIdMark gc_id_mark;
+  GCIdMarkAndRestore gc_id_mark;
 
   const bool do_clear_all_soft_refs = clear_all_soft_refs ||
                           collector_policy()->should_clear_all_soft_refs();
@@ -823,7 +823,7 @@
   assert(_gen_policy->is_concurrent_mark_sweep_policy(), "Unexpected policy type");
   CMSCollector* collector =
     new CMSCollector((ConcurrentMarkSweepGeneration*)_old_gen,
-                     _rem_set->as_CardTableRS(),
+                     _rem_set,
                      _gen_policy->as_concurrent_mark_sweep_policy());
 
   if (collector == NULL || !collector->completed_initialization()) {
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -64,8 +64,8 @@
   Generation* _young_gen;
   Generation* _old_gen;
 
-  // The singleton Gen Remembered Set.
-  GenRemSet* _rem_set;
+  // The singleton CardTable Remembered Set.
+  CardTableRS* _rem_set;
 
   // The generational collector policy.
   GenCollectorPolicy* _gen_policy;
@@ -361,9 +361,9 @@
   // collection.
   virtual bool is_maximal_no_gc() const;
 
-  // This function returns the "GenRemSet" object that allows us to scan
+  // This function returns the CardTableRS object that allows us to scan
   // generations in a fully generational heap.
-  GenRemSet* rem_set() { return _rem_set; }
+  CardTableRS* rem_set() { return _rem_set; }
 
   // Convenience function to be used in situations where the heap type can be
   // asserted to be this type.
--- a/hotspot/src/share/vm/gc/shared/genOopClosures.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/genOopClosures.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -157,7 +157,7 @@
   }
  public:
   FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
-    ExtendedOopClosure(cl->_ref_processor), _boundary(boundary),
+    ExtendedOopClosure(cl->ref_processor()), _boundary(boundary),
     _cl(cl) {}
   virtual void do_oop(oop* p);
   virtual void do_oop(narrowOop* p);
--- a/hotspot/src/share/vm/gc/shared/genOopClosures.inline.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/genOopClosures.inline.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -29,7 +29,6 @@
 #include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/genOopClosures.hpp"
-#include "gc/shared/genRemSet.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.hpp"
 
@@ -43,8 +42,7 @@
   _gen_boundary = _gen->reserved().start();
   // Barrier set for the heap, must be set after heap is initialized
   if (_rs == NULL) {
-    GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
-    _rs = (CardTableRS*)rs;
+    _rs = GenCollectedHeap::heap()->rem_set();
   }
 }
 
--- a/hotspot/src/share/vm/gc/shared/genRemSet.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderData.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/genRemSet.hpp"
-#include "oops/klass.hpp"
-
-// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
-// enumerate ref fields that have been modified (since the last
-// enumeration.)
-
-uintx GenRemSet::max_alignment_constraint() {
-  return CardTableRS::ct_max_alignment_constraint();
-}
-
-class HasAccumulatedModifiedOopsClosure : public KlassClosure {
-  bool _found;
- public:
-  HasAccumulatedModifiedOopsClosure() : _found(false) {}
-  void do_klass(Klass* klass) {
-    if (_found) {
-      return;
-    }
-
-    if (klass->has_accumulated_modified_oops()) {
-      _found = true;
-    }
-  }
-  bool found() {
-    return _found;
-  }
-};
-
-bool KlassRemSet::mod_union_is_clear() {
-  HasAccumulatedModifiedOopsClosure closure;
-  ClassLoaderDataGraph::classes_do(&closure);
-
-  return !closure.found();
-}
-
-
-class ClearKlassModUnionClosure : public KlassClosure {
- public:
-  void do_klass(Klass* klass) {
-    if (klass->has_accumulated_modified_oops()) {
-      klass->clear_accumulated_modified_oops();
-    }
-  }
-};
-
-void KlassRemSet::clear_mod_union() {
-  ClearKlassModUnionClosure closure;
-  ClassLoaderDataGraph::classes_do(&closure);
-}
--- a/hotspot/src/share/vm/gc/shared/genRemSet.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,133 +0,0 @@
-/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_GENREMSET_HPP
-#define SHARE_VM_GC_SHARED_GENREMSET_HPP
-
-#include "oops/oop.hpp"
-
-// A GenRemSet provides ways of iterating over pointers across generations.
-// (This is especially useful for older-to-younger.)
-
-class Generation;
-class BarrierSet;
-class OopsInGenClosure;
-class CardTableRS;
-
-// Helper to remember modified oops in all klasses.
-class KlassRemSet {
-  bool _accumulate_modified_oops;
- public:
-  KlassRemSet() : _accumulate_modified_oops(false) {}
-  void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
-  bool accumulate_modified_oops() { return _accumulate_modified_oops; }
-  bool mod_union_is_clear();
-  void clear_mod_union();
-};
-
-class GenRemSet: public CHeapObj<mtGC> {
-  friend class Generation;
-
-  BarrierSet* _bs;
-  KlassRemSet _klass_rem_set;
-
-public:
-  GenRemSet(BarrierSet * bs) : _bs(bs) {}
-  GenRemSet() : _bs(NULL) {}
-
-  // These are for dynamic downcasts.  Unfortunately that it names the
-  // possible subtypes (but not that they are subtypes!)  Return NULL if
-  // the cast is invalid.
-  virtual CardTableRS* as_CardTableRS() { return NULL; }
-
-  // Return the barrier set associated with "this."
-  BarrierSet* bs() { return _bs; }
-
-  // Set the barrier set.
-  void set_bs(BarrierSet* bs) { _bs = bs; }
-
-  KlassRemSet* klass_rem_set() { return &_klass_rem_set; }
-
-  // Do any (sequential) processing necessary to prepare for (possibly
-  // "parallel", if that arg is true) calls to younger_refs_iterate.
-  virtual void prepare_for_younger_refs_iterate(bool parallel) = 0;
-
-  // Apply the "do_oop" method of "blk" to (exactly) all oop locations
-  //  1) that are in objects allocated in "g" at the time of the last call
-  //     to "save_Marks", and
-  //  2) that point to objects in younger generations.
-  virtual void younger_refs_iterate(Generation* g, OopsInGenClosure* blk, uint n_threads) = 0;
-
-  virtual void younger_refs_in_space_iterate(Space* sp,
-                                             OopsInGenClosure* cl,
-                                             uint n_threads) = 0;
-
-  // This method is used to notify the remembered set that "new_val" has
-  // been written into "field" by the garbage collector.
-  void write_ref_field_gc(void* field, oop new_val);
-protected:
-  virtual void write_ref_field_gc_work(void* field, oop new_val) = 0;
-public:
-
-  // A version of the above suitable for use by parallel collectors.
-  virtual void write_ref_field_gc_par(void* field, oop new_val) = 0;
-
-  // Resize one of the regions covered by the remembered set.
-  virtual void resize_covered_region(MemRegion new_region) = 0;
-
-  // If the rem set imposes any alignment restrictions on boundaries
-  // within the heap, this function tells whether they are met.
-  virtual bool is_aligned(HeapWord* addr) = 0;
-
-  // Returns any alignment constraint that the remembered set imposes upon the
-  // heap.
-  static uintx max_alignment_constraint();
-
-  virtual void verify() = 0;
-
-  // If appropriate, print some information about the remset on "tty".
-  virtual void print() {}
-
-  // Informs the RS that the given memregion contains no references to
-  // the young generation.
-  virtual void clear(MemRegion mr) = 0;
-
-  // Informs the RS that there are no references to the young generation
-  // from old_gen.
-  virtual void clear_into_younger(Generation* old_gen) = 0;
-
-  // Informs the RS that refs in the given "mr" may have changed
-  // arbitrarily, and therefore may contain old-to-young pointers.
-  // If "whole heap" is true, then this invalidation is part of an
-  // invalidation of the whole heap, which an implementation might
-  // handle differently than that of a sub-part of the heap.
-  virtual void invalidate(MemRegion mr, bool whole_heap = false) = 0;
-
-  // Informs the RS that refs in this generation
-  // may have changed arbitrarily, and therefore may contain
-  // old-to-young pointers in arbitrary locations.
-  virtual void invalidate_or_clear(Generation* old_gen) = 0;
-};
-
-#endif // SHARE_VM_GC_SHARED_GENREMSET_HPP
--- a/hotspot/src/share/vm/gc/shared/generation.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/generation.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -293,7 +293,7 @@
 void Generation::younger_refs_in_space_iterate(Space* sp,
                                                OopsInGenClosure* cl,
                                                uint n_threads) {
-  GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
+  CardTableRS* rs = GenCollectedHeap::heap()->rem_set();
   rs->younger_refs_in_space_iterate(sp, cl, n_threads);
 }
 
--- a/hotspot/src/share/vm/gc/shared/generation.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/generation.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -27,7 +27,6 @@
 
 #include "gc/shared/collectorCounters.hpp"
 #include "gc/shared/referenceProcessor.hpp"
-#include "gc/shared/watermark.hpp"
 #include "memory/allocation.hpp"
 #include "memory/memRegion.hpp"
 #include "memory/universe.hpp"
@@ -67,7 +66,6 @@
 class ScanClosure;
 class FastScanClosure;
 class GenCollectedHeap;
-class GenRemSet;
 class GCStats;
 
 // A "ScratchBlock" represents a block of memory in one generation usable by
--- a/hotspot/src/share/vm/gc/shared/generationSpec.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/generationSpec.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/tenuredGeneration.hpp"
-#include "gc/shared/genRemSet.hpp"
+#include "gc/shared/cardTableRS.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/filemap.hpp"
@@ -36,7 +36,7 @@
 #include "gc/cms/parNewGeneration.hpp"
 #endif // INCLUDE_ALL_GCS
 
-Generation* GenerationSpec::init(ReservedSpace rs, GenRemSet* remset) {
+Generation* GenerationSpec::init(ReservedSpace rs, CardTableRS* remset) {
   switch (name()) {
     case Generation::DefNew:
       return new DefNewGeneration(rs, init_size());
@@ -50,8 +50,7 @@
 
     case Generation::ConcurrentMarkSweep: {
       assert(UseConcMarkSweepGC, "UseConcMarkSweepGC should be set");
-      CardTableRS* ctrs = remset->as_CardTableRS();
-      if (ctrs == NULL) {
+      if (remset == NULL) {
         vm_exit_during_initialization("Rem set incompatibility.");
       }
       // Otherwise
@@ -60,7 +59,7 @@
 
       ConcurrentMarkSweepGeneration* g = NULL;
       g = new ConcurrentMarkSweepGeneration(rs,
-                 init_size(), ctrs, UseCMSAdaptiveFreeLists,
+                 init_size(), remset, UseCMSAdaptiveFreeLists,
                  (FreeBlockDictionary<FreeChunk>::DictionaryChoice)CMSDictionaryChoice);
 
       g->initialize_performance_counters();
--- a/hotspot/src/share/vm/gc/shared/generationSpec.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/generationSpec.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -45,7 +45,7 @@
     _max_size(align_size_up(max_size, alignment))
   { }
 
-  Generation* init(ReservedSpace rs, GenRemSet* remset);
+  Generation* init(ReservedSpace rs, CardTableRS* remset);
 
   // Accessors
   Generation::Name name()        const { return _name; }
--- a/hotspot/src/share/vm/gc/shared/space.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/space.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -529,8 +529,7 @@
 
 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
   if (is_empty()) return;
-  WaterMark bm = bottom_mark();
-  object_iterate_from(bm, blk);
+  object_iterate_from(bottom(), blk);
 }
 
 // For a ContiguousSpace object_iterate() and safe_object_iterate()
@@ -539,12 +538,10 @@
   object_iterate(blk);
 }
 
-void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
-  assert(mark.space() == this, "Mark does not match space");
-  HeapWord* p = mark.point();
-  while (p < top()) {
-    blk->do_object(oop(p));
-    p += oop(p)->size();
+void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) {
+  while (mark < top()) {
+    blk->do_object(oop(mark));
+    mark += oop(mark)->size();
   }
 }
 
--- a/hotspot/src/share/vm/gc/shared/space.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/gc/shared/space.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -27,7 +27,6 @@
 
 #include "gc/shared/blockOffsetTable.hpp"
 #include "gc/shared/cardTableModRefBS.hpp"
-#include "gc/shared/watermark.hpp"
 #include "gc/shared/workgroup.hpp"
 #include "memory/allocation.hpp"
 #include "memory/iterator.hpp"
@@ -48,7 +47,6 @@
 class Generation;
 class CompactibleSpace;
 class BlockOffsetTable;
-class GenRemSet;
 class CardTableRS;
 class DirtyCardToOopClosure;
 
@@ -541,9 +539,6 @@
   void set_saved_mark()            { _saved_mark_word = top();    }
   void reset_saved_mark()          { _saved_mark_word = bottom(); }
 
-  WaterMark bottom_mark()     { return WaterMark(this, bottom()); }
-  WaterMark top_mark()        { return WaterMark(this, top()); }
-  WaterMark saved_mark()      { return WaterMark(this, saved_mark_word()); }
   bool saved_mark_at_top() const { return saved_mark_word() == top(); }
 
   // In debug mode mangle (write it with a particular bit
@@ -649,7 +644,7 @@
   // Same as object_iterate, but starting from "mark", which is required
   // to denote the start of an object.  Objects allocated by
   // applications of the closure *are* included in the iteration.
-  virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
+  virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
 
   // Very inefficient implementation.
   virtual HeapWord* block_start_const(const void* p) const;
--- a/hotspot/src/share/vm/gc/shared/watermark.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_WATERMARK_HPP
-#define SHARE_VM_GC_SHARED_WATERMARK_HPP
-
-#include "memory/allocation.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// A water mark points into a space and is used during GC to keep track of
-// progress.
-
-class Space;
-
-class WaterMark VALUE_OBJ_CLASS_SPEC {
-  friend class VMStructs;
- private:
-  HeapWord* _point;
-  Space*    _space;
- public:
-  // Accessors
-  Space* space() const        { return _space;  }
-  void set_space(Space* s)    { _space = s;     }
-  HeapWord* point() const     { return _point;  }
-  void set_point(HeapWord* p) { _point = p;     }
-
-  // Constructors
-  WaterMark(Space* s, HeapWord* p) : _space(s), _point(p) {};
-  WaterMark() : _space(NULL), _point(NULL) {};
-};
-
-inline bool operator==(const WaterMark& x, const WaterMark& y) {
-  return (x.point() == y.point()) && (x.space() == y.space());
-}
-
-inline bool operator!=(const WaterMark& x, const WaterMark& y) {
-  return !(x == y);
-}
-
-#endif // SHARE_VM_GC_SHARED_WATERMARK_HPP
--- a/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeInterpreter.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -2791,7 +2791,7 @@
                       (int)continuation_bci, p2i(THREAD));
       }
       // for AbortVMOnException flag
-      NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
+      Exceptions::debug_check_abort(except_oop);
 
       // Update profiling data.
       BI_PROFILE_ALIGN_TO_CURRENT_BCI();
@@ -2807,7 +2807,8 @@
                     p2i(THREAD));
     }
     // for AbortVMOnException flag
-    NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
+    Exceptions::debug_check_abort(except_oop);
+
     // No handler in this activation, unwind and try again
     THREAD->set_pending_exception(except_oop(), NULL, 0);
     goto handle_return;
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -458,7 +458,7 @@
 //       // warning("performance bug: should not call runtime if method has no exception handlers");
 //     }
     // for AbortVMOnException flag
-    NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
+    Exceptions::debug_check_abort(h_exception);
 
     // exception handler lookup
     KlassHandle h_klass(THREAD, h_exception->klass());
--- a/hotspot/src/share/vm/memory/iterator.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/memory/iterator.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,10 +51,18 @@
 // This is needed by the GC and is extracted to a separate type to not
 // pollute the OopClosure interface.
 class ExtendedOopClosure : public OopClosure {
- public:
+ private:
   ReferenceProcessor* _ref_processor;
+
+ protected:
   ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
-  ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
+  ExtendedOopClosure() : _ref_processor(NULL) { }
+  ~ExtendedOopClosure() { }
+
+  void set_ref_processor_internal(ReferenceProcessor* rp) { _ref_processor = rp; }
+
+ public:
+  ReferenceProcessor* ref_processor() const { return _ref_processor; }
 
   // If the do_metadata functions return "true",
   // we invoke the following when running oop_iterate():
--- a/hotspot/src/share/vm/memory/universe.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/memory/universe.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -35,7 +35,6 @@
 #include "gc/shared/collectedHeap.inline.hpp"
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/genRemSet.hpp"
 #include "gc/shared/generation.hpp"
 #include "gc/shared/space.hpp"
 #include "interpreter/interpreter.hpp"
--- a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -43,7 +43,7 @@
 
   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
   T heap_oop = oopDesc::load_heap_oop(referent_addr);
-  ReferenceProcessor* rp = closure->_ref_processor;
+  ReferenceProcessor* rp = closure->ref_processor();
   if (!oopDesc::is_null(heap_oop)) {
     oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
     if (!referent->is_gc_marked() && (rp != NULL) &&
--- a/hotspot/src/share/vm/oops/method.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -72,17 +72,14 @@
                                           sizes,
                                           method_type,
                                           CHECK_NULL);
-
   int size = Method::size(access_flags.is_native());
-
-  return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags, size);
+  return new (loader_data, size, false, MetaspaceObj::MethodType, THREAD) Method(cm, access_flags);
 }
 
-Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
+Method::Method(ConstMethod* xconst, AccessFlags access_flags) {
   No_Safepoint_Verifier no_safepoint;
   set_constMethod(xconst);
   set_access_flags(access_flags);
-  set_method_size(size);
 #ifdef CC_INTERP
   set_result_index(T_VOID);
 #endif
@@ -1227,7 +1224,6 @@
                                       m->method_type(),
                                       CHECK_(methodHandle()));
   methodHandle newm (THREAD, newm_oop);
-  int new_method_size = newm->method_size();
 
   // Create a shallow copy of Method part, but be careful to preserve the new ConstMethod*
   ConstMethod* newcm = newm->constMethod();
@@ -1242,7 +1238,6 @@
   newm->set_constMethod(newcm);
   newm->constMethod()->set_code_size(new_code_length);
   newm->constMethod()->set_constMethod_size(new_const_method_size);
-  newm->set_method_size(new_method_size);
   assert(newm->code_size() == new_code_length, "check");
   assert(newm->method_parameters_length() == method_parameters_len, "check");
   assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
--- a/hotspot/src/share/vm/oops/method.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -71,7 +71,6 @@
 #ifdef CC_INTERP
   int               _result_index;               // C++ interpreter needs for converting results to/from stack
 #endif
-  u2                _method_size;                // size of this object
   u2                _intrinsic_id;               // vmSymbols::intrinsic_id (0 == _none)
 
   // Flags
@@ -106,7 +105,7 @@
   volatile address           _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
 
   // Constructor
-  Method(ConstMethod* xconst, AccessFlags access_flags, int size);
+  Method(ConstMethod* xconst, AccessFlags access_flags);
  public:
 
   static Method* allocate(ClassLoaderData* loader_data,
@@ -241,12 +240,8 @@
   // code size
   int code_size() const                  { return constMethod()->code_size(); }
 
-  // method size
-  int method_size() const                        { return _method_size; }
-  void set_method_size(int size) {
-    assert(0 <= size && size < (1 << 16), "invalid method size");
-    _method_size = size;
-  }
+  // method size in words
+  int method_size() const                { return sizeof(Method)/wordSize + is_native() ? 2 : 0; }
 
   // constant pool for Klass* holding this method
   ConstantPool* constants() const              { return constMethod()->constants(); }
--- a/hotspot/src/share/vm/opto/runtime.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/opto/runtime.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -1235,7 +1235,7 @@
   }
 
   // for AbortVMOnException flag
-  NOT_PRODUCT(Exceptions::debug_check_abort(exception));
+  Exceptions::debug_check_abort(exception);
 
 #ifdef ASSERT
   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
--- a/hotspot/src/share/vm/precompiled/precompiled.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/precompiled/precompiled.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -101,7 +101,6 @@
 # include "gc/shared/gcStats.hpp"
 # include "gc/shared/gcUtil.hpp"
 # include "gc/shared/genCollectedHeap.hpp"
-# include "gc/shared/genRemSet.hpp"
 # include "gc/shared/generation.hpp"
 # include "gc/shared/generationCounters.hpp"
 # include "gc/shared/modRefBarrierSet.hpp"
@@ -111,7 +110,6 @@
 # include "gc/shared/spaceDecorator.hpp"
 # include "gc/shared/taskqueue.hpp"
 # include "gc/shared/threadLocalAllocBuffer.hpp"
-# include "gc/shared/watermark.hpp"
 # include "gc/shared/workgroup.hpp"
 # include "interpreter/abstractInterpreter.hpp"
 # include "interpreter/bytecode.hpp"
--- a/hotspot/src/share/vm/prims/jni.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -3874,6 +3874,7 @@
 void TestCodeCacheRemSet_test();
 void FreeRegionList_test();
 void test_memset_with_concurrent_readers();
+void TestPredictions_test();
 #endif
 
 void execute_internal_vm_tests() {
@@ -3916,6 +3917,7 @@
       run_unit_test(FreeRegionList_test());
     }
     run_unit_test(test_memset_with_concurrent_readers());
+    run_unit_test(TestPredictions_test());
 #endif
     tty->print_cr("All internal VM tests passed");
   }
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -2436,20 +2436,6 @@
     MarkSweepAlwaysCompactCount = 1;  // Move objects every gc.
   }
 
-  if (UseParallelOldGC && ParallelOldGCSplitALot) {
-    // Settings to encourage splitting.
-    if (!FLAG_IS_CMDLINE(NewRatio)) {
-      if (FLAG_SET_CMDLINE(uintx, NewRatio, 2) != Flag::SUCCESS) {
-        status = false;
-      }
-    }
-    if (!FLAG_IS_CMDLINE(ScavengeBeforeFullGC)) {
-      if (FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false) != Flag::SUCCESS) {
-        status = false;
-      }
-    }
-  }
-
   if (!(UseParallelGC || UseParallelOldGC) && FLAG_IS_DEFAULT(ScavengeBeforeFullGC)) {
     FLAG_SET_DEFAULT(ScavengeBeforeFullGC, false);
   }
@@ -3350,19 +3336,6 @@
         return JNI_EINVAL;
       }
 #endif
-    } else if (match_option(option, "-XX:MaxDirectMemorySize=", &tail)) {
-      julong max_direct_memory_size = 0;
-      ArgsRange errcode = parse_memory_size(tail, &max_direct_memory_size, 0);
-      if (errcode != arg_in_range) {
-        jio_fprintf(defaultStream::error_stream(),
-                    "Invalid maximum direct memory size: %s\n",
-                    option->optionString);
-        describe_range_error(errcode);
-        return JNI_EINVAL;
-      }
-      if (FLAG_SET_CMDLINE(size_t, MaxDirectMemorySize, max_direct_memory_size) != Flag::SUCCESS) {
-        return JNI_EINVAL;
-      }
 #if !INCLUDE_MANAGEMENT
     } else if (match_option(option, "-XX:+ManagementServer")) {
         jio_fprintf(defaultStream::error_stream(),
@@ -3990,16 +3963,8 @@
     return code;
   }
 
-  // Now set global settings from the vm_option file, giving an error if
-  // it has VMOptionsFile in it
-  code = match_special_option_and_act(vm_options_file_args->get(), flags_file,
-                                      NULL, NULL, NULL);
-  if (code != JNI_OK) {
-    return code;
-  }
-
   if (vm_options_file_args->get()->nOptions < 1) {
-    return 0;
+    return JNI_OK;
   }
 
   return args_out->insert(args, vm_options_file_args->get(),
@@ -4034,17 +3999,29 @@
         // The caller accepts -XX:VMOptionsFile
         if (*vm_options_file != NULL) {
           jio_fprintf(defaultStream::error_stream(),
-                      "Only one VM Options file is supported "
-                      "on the command line\n");
+                      "The VM Options file can only be specified once and "
+                      "only on the command line.\n");
           return JNI_EINVAL;
         }
 
         *vm_options_file = (char *) tail;
         vm_options_file_pos = index;  // save position of -XX:VMOptionsFile
-        if (*vm_options_file == NULL) {
-          jio_fprintf(defaultStream::error_stream(),
-                      "Cannot copy vm_options_file name.\n");
-          return JNI_ENOMEM;
+        // If there's a VMOptionsFile, parse that (also can set flags_file)
+        jint code = insert_vm_options_file(args, flags_file, vm_options_file,
+                                           vm_options_file_pos,
+                                           vm_options_file_args, args_out);
+        if (code != JNI_OK) {
+          return code;
+        }
+        if (args_out->is_set()) {
+          // The VMOptions file inserted some options so switch 'args'
+          // to the new set of options, and continue processing which
+          // preserves "last option wins" semantics.
+          args = args_out->get();
+          // The first option from the VMOptionsFile replaces the
+          // current option.  So we back track to process the
+          // replacement option.
+          index--;
         }
       } else {
         jio_fprintf(defaultStream::error_stream(),
@@ -4104,12 +4081,6 @@
     }
 #endif
   }
-
-  // If there's a VMOptionsFile, parse that (also can set flags_file)
-  if ((vm_options_file != NULL) && (*vm_options_file != NULL)) {
-    return insert_vm_options_file(args, flags_file, vm_options_file,
-                                  vm_options_file_pos, vm_options_file_args, args_out);
-  }
   return JNI_OK;
 }
 
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -223,7 +223,7 @@
 #define EMIT_CONSTRAINT_CHECK(func, type)                               , func, CommandLineFlagConstraint::type
 
 // the "name" argument must be a string literal
-#define INITIAL_CONSTRAINTS_SIZE 40
+#define INITIAL_CONSTRAINTS_SIZE 45
 GrowableArray<CommandLineFlagConstraint*>* CommandLineFlagConstraintList::_constraints = NULL;
 CommandLineFlagConstraint::ConstraintType CommandLineFlagConstraintList::_validating_type = CommandLineFlagConstraint::AtParse;
 
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintList.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -54,9 +54,9 @@
   enum ConstraintType {
     // Will be validated during argument processing (Arguments::parse_argument).
     AtParse         = 0,
-    // Will be validated by CommandLineFlagConstraintList::check_constraints(AfterErgo).
+    // Will be validated inside Threads::create_vm(), right after Arguments::apply_ergo().
     AfterErgo       = 1,
-    // Will be validated by CommandLineFlagConstraintList::check_constraints(AfterMemoryInit).
+    // Will be validated inside universe_init(), right after Metaspace::global_initialize().
     AfterMemoryInit = 2
   };
 
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -27,6 +27,7 @@
 #include "runtime/commandLineFlagConstraintsRuntime.hpp"
 #include "runtime/commandLineFlagRangeList.hpp"
 #include "runtime/globals.hpp"
+#include "runtime/task.hpp"
 #include "utilities/defaultStream.hpp"
 
 Flag::Error ObjectAlignmentInBytesConstraintFunc(intx value, bool verbose) {
@@ -41,7 +42,7 @@
   if (value >= (intx)os::vm_page_size()) {
     CommandLineError::print(verbose,
                             "ObjectAlignmentInBytes (" INTX_FORMAT ") must be "
-                            "less than page size " INTX_FORMAT "\n",
+                            "less than page size (" INTX_FORMAT ")\n",
                             value, (intx)os::vm_page_size());
     return Flag::VIOLATES_CONSTRAINT;
   }
@@ -51,7 +52,7 @@
 // Need to enforce the padding not to break the existing field alignments.
 // It is sufficient to check against the largest type size.
 Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose) {
-  if ((value != 0) && ((value % BytesPerLong) != 0)) {
+  if ((value % BytesPerLong) != 0) {
     CommandLineError::print(verbose,
                             "ContendedPaddingWidth (" INTX_FORMAT ") must be "
                             "a multiple of %d\n",
@@ -61,3 +62,71 @@
     return Flag::SUCCESS;
   }
 }
+
+Flag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose) {
+  if (value > BiasedLockingBulkRevokeThreshold) {
+    CommandLineError::print(verbose,
+                            "BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ") must be "
+                            "less than or equal to BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")\n",
+                            value, BiasedLockingBulkRevokeThreshold);
+    return Flag::VIOLATES_CONSTRAINT;
+  } else {
+    return Flag::SUCCESS;
+  }
+}
+
+Flag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose) {
+  if ((value % PeriodicTask::interval_gran) != 0) {
+    CommandLineError::print(verbose,
+                            "BiasedLockingStartupDelay (" INTX_FORMAT ") must be "
+                            "evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
+                            value, PeriodicTask::interval_gran);
+    return Flag::VIOLATES_CONSTRAINT;
+  } else {
+    return Flag::SUCCESS;
+  }
+}
+
+Flag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose) {
+  if (value < BiasedLockingBulkRebiasThreshold) {
+    CommandLineError::print(verbose,
+                            "BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ") must be "
+                            "greater than or equal to BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")\n",
+                            value, BiasedLockingBulkRebiasThreshold);
+    return Flag::VIOLATES_CONSTRAINT;
+  } else if ((double)value/(double)BiasedLockingDecayTime > 0.1) {
+    CommandLineError::print(verbose,
+                            "The ratio of BiasedLockingBulkRevokeThreshold (" INTX_FORMAT ")"
+                            " to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
+                            "less than or equal to 0.1\n",
+                            value, BiasedLockingBulkRebiasThreshold);
+    return Flag::VIOLATES_CONSTRAINT;
+  } else {
+    return Flag::SUCCESS;
+  }
+}
+
+Flag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose) {
+  if (BiasedLockingBulkRebiasThreshold/(double)value > 0.1) {
+    CommandLineError::print(verbose,
+                            "The ratio of BiasedLockingBulkRebiasThreshold (" INTX_FORMAT ")"
+                            " to BiasedLockingDecayTime (" INTX_FORMAT ") must be "
+                            "less than or equal to 0.1\n",
+                            BiasedLockingBulkRebiasThreshold, value);
+    return Flag::VIOLATES_CONSTRAINT;
+  } else {
+    return Flag::SUCCESS;
+  }
+}
+
+Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose) {
+  if ((value % PeriodicTask::interval_gran != 0)) {
+    CommandLineError::print(verbose,
+                            "PerfDataSamplingInterval (" INTX_FORMAT ") must be "
+                            "evenly divisible by PeriodicTask::interval_gran (" INTX_FORMAT ")\n",
+                            value, PeriodicTask::interval_gran);
+    return Flag::VIOLATES_CONSTRAINT;
+  } else {
+    return Flag::SUCCESS;
+  }
+}
--- a/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/commandLineFlagConstraintsRuntime.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -38,4 +38,11 @@
 
 Flag::Error ContendedPaddingWidthConstraintFunc(intx value, bool verbose);
 
+Flag::Error BiasedLockingBulkRebiasThresholdFunc(intx value, bool verbose);
+Flag::Error BiasedLockingStartupDelayFunc(intx value, bool verbose);
+Flag::Error BiasedLockingBulkRevokeThresholdFunc(intx value, bool verbose);
+Flag::Error BiasedLockingDecayTimeFunc(intx value, bool verbose);
+
+Flag::Error PerfDataSamplingIntervalFunc(intx value, bool verbose);
+
 #endif /* SHARE_VM_RUNTIME_COMMANDLINEFLAGCONSTRAINTSRUNTIME_HPP */
--- a/hotspot/src/share/vm/runtime/commandLineFlagRangeList.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/commandLineFlagRangeList.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -29,6 +29,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/commandLineFlagRangeList.hpp"
 #include "runtime/os.hpp"
+#include "runtime/task.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/macros.hpp"
 
@@ -278,7 +279,7 @@
 // Generate func argument to pass into emit_range_xxx functions
 #define EMIT_RANGE_CHECK(a, b)                               , a, b
 
-#define INITIAL_RANGES_SIZE 165
+#define INITIAL_RANGES_SIZE 204
 GrowableArray<CommandLineFlagRange*>* CommandLineFlagRangeList::_ranges = NULL;
 
 // Check the ranges of all flags that have them
--- a/hotspot/src/share/vm/runtime/globals.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -894,9 +894,11 @@
   /* typically, at most a few retries are needed                    */      \
   product(intx, SuspendRetryCount, 50,                                      \
           "Maximum retry count for an external suspend request")            \
+          range(0, max_intx)                                                \
                                                                             \
   product(intx, SuspendRetryDelay, 5,                                       \
           "Milliseconds to delay per retry (* current_retry_count)")        \
+          range(0, max_intx)                                                \
                                                                             \
   product(bool, AssertOnSuspendWaitFailure, false,                          \
           "Assert/Guarantee on external suspend wait failure")              \
@@ -1342,6 +1344,7 @@
           "Maximum allowable local JNI handle capacity to "                 \
           "EnsureLocalCapacity() and PushLocalFrame(), "                    \
           "where <= 0 is unlimited, default: 65536")                        \
+          range(min_intx, max_intx)                                         \
                                                                             \
   product(bool, EagerXrunInit, false,                                       \
           "Eagerly initialize -Xrun libraries; allows startup profiling, "  \
@@ -1377,7 +1380,7 @@
   product(intx, ContendedPaddingWidth, 128,                                 \
           "How many bytes to pad the fields/classes marked @Contended with")\
           range(0, 8192)                                                    \
-          constraint(ContendedPaddingWidthConstraintFunc,AtParse)           \
+          constraint(ContendedPaddingWidthConstraintFunc,AfterErgo)         \
                                                                             \
   product(bool, EnableContended, true,                                      \
           "Enable @Contended annotation support")                           \
@@ -1390,6 +1393,8 @@
                                                                             \
   product(intx, BiasedLockingStartupDelay, 4000,                            \
           "Number of milliseconds to wait before enabling biased locking")  \
+          range(0, (intx)(max_jint-(max_jint%PeriodicTask::interval_gran))) \
+          constraint(BiasedLockingStartupDelayFunc,AfterErgo)               \
                                                                             \
   diagnostic(bool, PrintBiasedLockingStatistics, false,                     \
           "Print statistics of biased locking in JVM")                      \
@@ -1397,14 +1402,20 @@
   product(intx, BiasedLockingBulkRebiasThreshold, 20,                       \
           "Threshold of number of revocations per type to try to "          \
           "rebias all objects in the heap of that type")                    \
+          range(0, max_intx)                                                \
+          constraint(BiasedLockingBulkRebiasThresholdFunc,AfterErgo)        \
                                                                             \
   product(intx, BiasedLockingBulkRevokeThreshold, 40,                       \
           "Threshold of number of revocations per type to permanently "     \
           "revoke biases of all objects in the heap of that type")          \
+          range(0, max_intx)                                                \
+          constraint(BiasedLockingBulkRevokeThresholdFunc,AfterErgo)        \
                                                                             \
   product(intx, BiasedLockingDecayTime, 25000,                              \
           "Decay time (in milliseconds) to re-enable bulk rebiasing of a "  \
           "type after previous bulk rebias")                                \
+          range(500, max_intx)                                              \
+          constraint(BiasedLockingDecayTimeFunc,AfterErgo)                  \
                                                                             \
   /* tracing */                                                             \
                                                                             \
@@ -1429,8 +1440,9 @@
   product(bool, StressLdcRewrite, false,                                    \
           "Force ldc -> ldc_w rewrite during RedefineClasses")              \
                                                                             \
-  product(intx, TraceRedefineClasses, 0,                                    \
+  product(uintx, TraceRedefineClasses, 0,                                   \
           "Trace level for JVMTI RedefineClasses")                          \
+          range(0, 0xFFFFFFFF)                                              \
                                                                             \
   /* change to false by default sometime after Mustang */                   \
   product(bool, VerifyMergedCPBytecodes, true,                              \
@@ -1567,14 +1579,6 @@
   product(bool, TraceDynamicGCThreads, false,                               \
           "Trace the dynamic GC thread usage")                              \
                                                                             \
-  develop(bool, ParallelOldGCSplitALot, false,                              \
-          "Provoke splitting (copying data from a young gen space to "      \
-          "multiple destination spaces)")                                   \
-                                                                            \
-  develop(uintx, ParallelOldGCSplitInterval, 3,                             \
-          "How often to provoke splitting a young gen space")               \
-          range(0, max_uintx)                                               \
-                                                                            \
   product(uint, ConcGCThreads, 0,                                           \
           "Number of threads concurrent gc will use")                       \
           constraint(ConcGCThreadsConstraintFunc,AfterErgo)                 \
@@ -1593,9 +1597,6 @@
   product(bool, ScavengeBeforeFullGC, true,                                 \
           "Scavenge youngest generation before each full GC.")              \
                                                                             \
-  develop(bool, ScavengeWithObjectsInToSpace, false,                        \
-          "Allow scavenges to occur when to-space contains objects")        \
-                                                                            \
   product(bool, UseConcMarkSweepGC, false,                                  \
           "Use Concurrent Mark-Sweep GC in the old generation")             \
                                                                             \
@@ -2179,6 +2180,7 @@
                                                                             \
   product_pd(uint64_t, MaxRAM,                                              \
           "Real memory size (in bytes) used to set maximum heap size")      \
+          range(0, 0XFFFFFFFFFFFFFFFF)                                      \
                                                                             \
   product(size_t, ErgoHeapSizeLimit, 0,                                     \
           "Maximum ergonomically set heap size (in bytes); zero means use " \
@@ -2237,12 +2239,6 @@
           "Policy for changing generation size for throughput goals")       \
           range(0, 1)                                                       \
                                                                             \
-  develop(bool, PSAdjustTenuredGenForMinorPause, false,                     \
-          "Adjust tenured generation to achieve a minor pause goal")        \
-                                                                            \
-  develop(bool, PSAdjustYoungGenForMajorPause, false,                       \
-          "Adjust young generation to achieve a major pause goal")          \
-                                                                            \
   product(uintx, AdaptiveSizePolicyInitializingSteps, 20,                   \
           "Number of steps where heuristics is used before data is used")   \
                                                                             \
@@ -2687,10 +2683,13 @@
   product(intx, PrintSafepointStatisticsCount, 300,                         \
           "Total number of safepoint statistics collected "                 \
           "before printing them out")                                       \
+          range(1, max_intx)                                                \
                                                                             \
   product(intx, PrintSafepointStatisticsTimeout,  -1,                       \
           "Print safepoint statistics only when safepoint takes "           \
           "more than PrintSafepointSatisticsTimeout in millis")             \
+  LP64_ONLY(range(-1, max_intx/MICROUNITS))                                 \
+  NOT_LP64(range(-1, max_intx))                                             \
                                                                             \
   product(bool, TraceSafepointCleanupTime, false,                           \
           "Print the break down of clean up tasks performed during "        \
@@ -2740,6 +2739,7 @@
   diagnostic(intx, MinPassesBeforeFlush, 10,                                \
           "Minimum number of sweeper passes before an nmethod "             \
           "can be flushed")                                                 \
+          range(0, max_intx)                                                \
                                                                             \
   product(bool, UseCodeAging, true,                                         \
           "Insert counter to detect warm methods")                          \
@@ -2818,11 +2818,11 @@
           "standard exit from VM if bytecode verify error "                 \
           "(only in debug mode)")                                           \
                                                                             \
-  notproduct(ccstr, AbortVMOnException, NULL,                               \
+  diagnostic(ccstr, AbortVMOnException, NULL,                               \
           "Call fatal if this exception is thrown.  Example: "              \
           "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
                                                                             \
-  notproduct(ccstr, AbortVMOnExceptionMessage, NULL,                        \
+  diagnostic(ccstr, AbortVMOnExceptionMessage, NULL,                        \
           "Call fatal if the exception pointed by AbortVMOnException "      \
           "has this message")                                               \
                                                                             \
@@ -3116,21 +3116,29 @@
   product(intx, SelfDestructTimer, 0,                                       \
           "Will cause VM to terminate after a given time (in minutes) "     \
           "(0 means off)")                                                  \
+          range(0, max_intx)                                                \
                                                                             \
   product(intx, MaxJavaStackTraceDepth, 1024,                               \
           "The maximum number of lines in the stack trace for Java "        \
           "exceptions (0 means all)")                                       \
-                                                                            \
+          range(0, max_jint/2)                                              \
+                                                                            \
+  /* notice: the max range value here is max_jint, not max_intx  */         \
+  /* because of overflow issue                                   */         \
   NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000,          \
           "Guarantee a safepoint (at least) every so many milliseconds "    \
           "(0 means none)"))                                                \
+  NOT_EMBEDDED(range(0, max_jint))                                          \
                                                                             \
   EMBEDDED_ONLY(product(intx, GuaranteedSafepointInterval, 0,               \
           "Guarantee a safepoint (at least) every so many milliseconds "    \
           "(0 means none)"))                                                \
+  EMBEDDED_ONLY(range(0, max_jint))                                         \
                                                                             \
   product(intx, SafepointTimeoutDelay, 10000,                               \
           "Delay in milliseconds for option SafepointTimeout")              \
+  LP64_ONLY(range(0, max_intx/MICROUNITS))                                  \
+  NOT_LP64(range(0, max_intx))                                              \
                                                                             \
   product(intx, NmethodSweepActivity, 10,                                   \
           "Removes cold nmethods from code cache if > 0. Higher values "    \
@@ -3222,6 +3230,7 @@
   product(intx, ProfileIntervalsTicks, 100,                                 \
           "Number of ticks between printing of interval profile "           \
           "(+ProfileIntervals)")                                            \
+          range(0, max_intx)                                                \
                                                                             \
   notproduct(intx, ScavengeALotInterval,     1,                             \
           "Interval between which scavenge will occur with +ScavengeALot")  \
@@ -3255,14 +3264,17 @@
   diagnostic(intx, MallocVerifyInterval,     0,                             \
           "If non-zero, verify C heap after every N calls to "              \
           "malloc/realloc/free")                                            \
+          range(0, max_intx)                                                \
                                                                             \
   diagnostic(intx, MallocVerifyStart,     0,                                \
           "If non-zero, start verifying C heap after Nth call to "          \
           "malloc/realloc/free")                                            \
+          range(0, max_intx)                                                \
                                                                             \
   diagnostic(uintx, MallocMaxTestWords,     0,                              \
           "If non-zero, maximum number of words that malloc/realloc can "   \
           "allocate (for testing only)")                                    \
+          range(0, max_uintx)                                               \
                                                                             \
   product(intx, TypeProfileWidth, 2,                                        \
           "Number of receiver types to record in call/cast profile")        \
@@ -3506,10 +3518,12 @@
   product(intx, DeferThrSuspendLoopCount,     4000,                         \
           "(Unstable) Number of times to iterate in safepoint loop "        \
           "before blocking VM threads ")                                    \
+          range(-1, max_jint-1)                                             \
                                                                             \
   product(intx, DeferPollingPageLoopCount,     -1,                          \
           "(Unsafe,Unstable) Number of iterations in safepoint loop "       \
           "before changing safepoint polling page to RO ")                  \
+          range(-1, max_jint-1)                                             \
                                                                             \
   product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)")               \
           range(0, max_intx)                                                \
@@ -3524,23 +3538,25 @@
   /* stack parameters */                                                    \
   product_pd(intx, StackYellowPages,                                        \
           "Number of yellow zone (recoverable overflows) pages")            \
-          range(1, max_intx)                                                \
+          range(MIN_STACK_YELLOW_PAGES, (DEFAULT_STACK_YELLOW_PAGES+5))     \
                                                                             \
   product_pd(intx, StackRedPages,                                           \
           "Number of red zone (unrecoverable overflows) pages")             \
-          range(1, max_intx)                                                \
+          range(MIN_STACK_RED_PAGES, (DEFAULT_STACK_RED_PAGES+2))           \
                                                                             \
   /* greater stack shadow pages can't generate instruction to bang stack */ \
   product_pd(intx, StackShadowPages,                                        \
           "Number of shadow zone (for overflow checking) pages "            \
           "this should exceed the depth of the VM and native call stack")   \
-          range(1, 50)                                                      \
+          range(MIN_STACK_SHADOW_PAGES, (DEFAULT_STACK_SHADOW_PAGES+30))    \
                                                                             \
   product_pd(intx, ThreadStackSize,                                         \
           "Thread Stack Size (in Kbytes)")                                  \
+          range(0, max_intx-os::vm_page_size())                             \
                                                                             \
   product_pd(intx, VMThreadStackSize,                                       \
           "Non-Java Thread Stack Size (in Kbytes)")                         \
+          range(0, max_intx/(1 * K))                                        \
                                                                             \
   product_pd(intx, CompilerThreadStackSize,                                 \
           "Compiler Thread Stack Size (in Kbytes)")                         \
@@ -3551,7 +3567,8 @@
                                                                             \
   /* code cache parameters                                    */            \
   /* ppc64/tiered compilation has large code-entry alignment. */            \
-  develop(uintx, CodeCacheSegmentSize, 64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)),\
+  develop(uintx, CodeCacheSegmentSize,                                      \
+          64 PPC64_ONLY(+64) NOT_PPC64(TIERED_ONLY(+64)),                   \
           "Code cache segment size (in bytes) - smallest unit of "          \
           "allocation")                                                     \
           range(1, 1024)                                                    \
@@ -3729,6 +3746,7 @@
   product(intx, VMThreadPriority, -1,                                       \
           "The native priority at which the VM thread should run "          \
           "(-1 means no change)")                                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(bool, CompilerThreadHintNoPreempt, true,                          \
           "(Solaris only) Give compiler threads an extra quanta")           \
@@ -3738,33 +3756,43 @@
                                                                             \
   product(intx, JavaPriority1_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority2_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority3_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority4_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority5_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority6_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority7_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority8_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority9_To_OSPriority, -1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   product(intx, JavaPriority10_To_OSPriority,-1,                            \
           "Map Java priorities to OS priorities")                           \
+          range(-1, 127)                                                    \
                                                                             \
   experimental(bool, UseCriticalJavaThreadPriority, false,                  \
           "Java thread priority 10 maps to critical scheduling priority")   \
@@ -3974,6 +4002,7 @@
                                                                             \
   product(size_t, MaxDirectMemorySize, 0,                                   \
           "Maximum total size of NIO direct-buffer allocations")            \
+          range(0, (size_t)SIZE_MAX)                                        \
                                                                             \
   /* Flags used for temporary code during development  */                   \
                                                                             \
@@ -4002,6 +4031,8 @@
                                                                             \
   product(intx, PerfDataSamplingInterval, 50,                               \
           "Data sampling interval (in milliseconds)")                       \
+          range(PeriodicTask::min_interval, max_jint)                       \
+          constraint(PerfDataSamplingIntervalFunc, AfterErgo)               \
                                                                             \
   develop(bool, PerfTraceDataCreation, false,                               \
           "Trace creation of Performance Data Entries")                     \
@@ -4015,9 +4046,11 @@
   product(intx, PerfDataMemorySize, 64*K,                                   \
           "Size of performance data memory region. Will be rounded "        \
           "up to a multiple of the native os page size.")                   \
+          range(128, 32*64*K)                                               \
                                                                             \
   product(intx, PerfMaxStringConstLength, 1024,                             \
           "Maximum PerfStringConstant string length before truncation")     \
+          range(32, 32*K)                                                   \
                                                                             \
   product(bool, PerfAllowAtExitRegistration, false,                         \
           "Allow registration of atexit() methods")                         \
@@ -4077,10 +4110,10 @@
           "If PrintSharedArchiveAndExit is true, also print the shared "    \
           "dictionary")                                                     \
                                                                             \
-  product(size_t, SharedReadWriteSize,  NOT_LP64(12*M) LP64_ONLY(16*M),     \
+  product(size_t, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(16*M),      \
           "Size of read-write space for metadata (in bytes)")               \
                                                                             \
-  product(size_t, SharedReadOnlySize,  NOT_LP64(12*M) LP64_ONLY(16*M),      \
+  product(size_t, SharedReadOnlySize, NOT_LP64(12*M) LP64_ONLY(16*M),       \
           "Size of read-only space for metadata (in bytes)")                \
                                                                             \
   product(uintx, SharedMiscDataSize,    NOT_LP64(2*M) LP64_ONLY(4*M),       \
@@ -4095,6 +4128,7 @@
                                                                             \
   product(uintx, SharedSymbolTableBucketSize, 4,                            \
           "Average number of symbols per bucket in shared table")           \
+          range(2, 246)                                                     \
                                                                             \
   diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false,              \
           "Do not quit -Xshare:dump even if we encounter unverifiable "     \
--- a/hotspot/src/share/vm/runtime/safepoint.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/safepoint.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -338,7 +338,7 @@
       tty->print_cr("# SafepointSynchronize: Finished after "
                     INT64_FORMAT_W(6) " ms",
                     ((current_time - safepoint_limit_time) / MICROUNITS +
-                     SafepointTimeoutDelay));
+                     (jlong)SafepointTimeoutDelay));
     }
   }
 #endif
@@ -1050,10 +1050,6 @@
 void SafepointSynchronize::deferred_initialize_stat() {
   if (init_done) return;
 
-  if (PrintSafepointStatisticsCount <= 0) {
-    fatal("Wrong PrintSafepointStatisticsCount");
-  }
-
   // If PrintSafepointStatisticsTimeout is specified, the statistics data will
   // be printed right away, in which case, _safepoint_stats will regress to
   // a single element array. Otherwise, it is a circular ring buffer with default
@@ -1164,7 +1160,7 @@
   // PrintSafepointStatisticsTimeout will be printed out right away.
   // By default, it is -1 meaning all samples will be put into the list.
   if ( PrintSafepointStatisticsTimeout > 0) {
-    if (spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
+    if (spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) {
       print_statistics();
     }
   } else {
@@ -1230,7 +1226,7 @@
     os::javaTimeNanos() - cleanup_end_time;
 
   if ( PrintSafepointStatisticsTimeout < 0 ||
-       spstat->_time_to_sync > PrintSafepointStatisticsTimeout * MICROUNITS) {
+       spstat->_time_to_sync > (jlong)PrintSafepointStatisticsTimeout * MICROUNITS) {
     print_statistics();
   }
   tty->cr();
--- a/hotspot/src/share/vm/runtime/thread.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/thread.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -2222,7 +2222,7 @@
         tty->print_cr("Pending Async. exception installed of type: %s", InstanceKlass::cast(_pending_async_exception->klass())->external_name());
       }
       // for AbortVMOnException flag
-      NOT_PRODUCT(Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name()));
+      Exceptions::debug_check_abort(InstanceKlass::cast(_pending_async_exception->klass())->external_name());
     }
   }
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -55,7 +55,6 @@
 #include "gc/shared/generation.hpp"
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/space.hpp"
-#include "gc/shared/watermark.hpp"
 #include "interpreter/bytecodeInterpreter.hpp"
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
@@ -405,7 +404,6 @@
   nonstatic_field(Method,                      _method_counters,                              MethodCounters*)                       \
   nonstatic_field(Method,                      _access_flags,                                 AccessFlags)                           \
   nonstatic_field(Method,                      _vtable_index,                                 int)                                   \
-  nonstatic_field(Method,                      _method_size,                                  u2)                                    \
   nonstatic_field(Method,                      _intrinsic_id,                                 u2)                                    \
   nonstatic_field(Method,                      _flags,                                        u1)                                    \
   nonproduct_nonstatic_field(Method,           _compiled_invocation_count,                    int)                                   \
@@ -535,7 +533,7 @@
                                                                                                                                      \
   nonstatic_field(BlockOffsetArrayNonContigSpace, _unallocated_block,                         HeapWord*)                             \
                                                                                                                                      \
-  nonstatic_field(CardGeneration,              _rs,                                           GenRemSet*)                            \
+  nonstatic_field(CardGeneration,              _rs,                                           CardTableRS*)                          \
   nonstatic_field(CardGeneration,              _bts,                                          BlockOffsetSharedArray*)               \
   nonstatic_field(CardGeneration,              _shrink_factor,                                size_t)                                \
   nonstatic_field(CardGeneration,              _capacity_at_prologue,                         size_t)                                \
@@ -625,8 +623,6 @@
   nonstatic_field(VirtualSpace,                _lower_high,                                   char*)                                 \
   nonstatic_field(VirtualSpace,                _middle_high,                                  char*)                                 \
   nonstatic_field(VirtualSpace,                _upper_high,                                   char*)                                 \
-  nonstatic_field(WaterMark,                   _point,                                        HeapWord*)                             \
-  nonstatic_field(WaterMark,                   _space,                                        Space*)                                \
                                                                                                                                      \
   /************************/                                                                                                         \
   /* PerfMemory - jvmstat */                                                                                                         \
@@ -1609,8 +1605,7 @@
            declare_type(CardTableModRefBS,            ModRefBarrierSet)   \
            declare_type(CardTableModRefBSForCTRS,     CardTableModRefBS)  \
   declare_toplevel_type(BarrierSet::Name)                                 \
-  declare_toplevel_type(GenRemSet)                                        \
-           declare_type(CardTableRS,                  GenRemSet)          \
+  declare_toplevel_type(CardTableRS)                                      \
   declare_toplevel_type(BlockOffsetSharedArray)                           \
   declare_toplevel_type(BlockOffsetTable)                                 \
            declare_type(BlockOffsetArray,             BlockOffsetTable)   \
@@ -1626,7 +1621,6 @@
   declare_toplevel_type(MemRegion)                                        \
   declare_toplevel_type(ThreadLocalAllocBuffer)                           \
   declare_toplevel_type(VirtualSpace)                                     \
-  declare_toplevel_type(WaterMark)                                        \
   declare_toplevel_type(ObjPtrQueue)                                      \
   declare_toplevel_type(DirtyCardQueue)                                   \
                                                                           \
@@ -1634,7 +1628,6 @@
                                                                           \
   declare_toplevel_type(BarrierSet*)                                      \
   declare_toplevel_type(BlockOffsetSharedArray*)                          \
-  declare_toplevel_type(GenRemSet*)                                       \
   declare_toplevel_type(CardTableRS*)                                     \
   declare_toplevel_type(CardTableModRefBS*)                               \
   declare_toplevel_type(CardTableModRefBS**)                              \
--- a/hotspot/src/share/vm/runtime/vmThread.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/runtime/vmThread.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -419,7 +419,7 @@
 
         // Support for self destruction
         if ((SelfDestructTimer != 0) && !is_error_reported() &&
-            (os::elapsedTime() > SelfDestructTimer * 60)) {
+            (os::elapsedTime() > (double)SelfDestructTimer * 60.0)) {
           tty->print_cr("VM self-destructed");
           exit(-1);
         }
--- a/hotspot/src/share/vm/utilities/exceptions.cpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp	Thu Oct 22 16:25:34 2015 -0700
@@ -145,7 +145,7 @@
                   p2i(h_exception()), file, line, p2i(thread));
   }
   // for AbortVMOnException flag
-  NOT_PRODUCT(Exceptions::debug_check_abort(h_exception, message));
+  Exceptions::debug_check_abort(h_exception, message);
 
   // Check for special boot-strapping/vm-thread handling
   if (special_exception(thread, file, line, h_exception)) {
@@ -477,13 +477,12 @@
 
 // ----------------------------------------------------------------------------------------
 
-#ifndef PRODUCT
 // caller frees value_string if necessary
 void Exceptions::debug_check_abort(const char *value_string, const char* message) {
   if (AbortVMOnException != NULL && value_string != NULL &&
       strstr(value_string, AbortVMOnException)) {
-    if (AbortVMOnExceptionMessage == NULL || message == NULL ||
-        strcmp(message, AbortVMOnExceptionMessage) == 0) {
+    if (AbortVMOnExceptionMessage == NULL || (message != NULL &&
+        strstr(message, AbortVMOnExceptionMessage))) {
       fatal("Saw %s, aborting", value_string);
     }
   }
@@ -491,14 +490,17 @@
 
 void Exceptions::debug_check_abort(Handle exception, const char* message) {
   if (AbortVMOnException != NULL) {
-    ResourceMark rm;
-    if (message == NULL && exception->is_a(SystemDictionary::Throwable_klass())) {
-      oop msg = java_lang_Throwable::message(exception);
-      if (msg != NULL) {
-        message = java_lang_String::as_utf8_string(msg);
-      }
-    }
-    debug_check_abort(InstanceKlass::cast(exception()->klass())->external_name(), message);
+    debug_check_abort_helper(exception, message);
   }
 }
-#endif
+
+void Exceptions::debug_check_abort_helper(Handle exception, const char* message) {
+  ResourceMark rm;
+  if (message == NULL && exception->is_a(SystemDictionary::Throwable_klass())) {
+    oop msg = java_lang_Throwable::message(exception);
+    if (msg != NULL) {
+      message = java_lang_String::as_utf8_string(msg);
+    }
+  }
+  debug_check_abort(InstanceKlass::cast(exception()->klass())->external_name(), message);
+}
--- a/hotspot/src/share/vm/utilities/exceptions.hpp	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/src/share/vm/utilities/exceptions.hpp	Thu Oct 22 16:25:34 2015 -0700
@@ -174,8 +174,9 @@
   static void print_exception_counts_on_error(outputStream* st);
 
   // for AbortVMOnException flag
-  NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
-  NOT_PRODUCT(static void debug_check_abort(const char *value_string, const char* message = NULL);)
+  static void debug_check_abort(Handle exception, const char* message = NULL);
+  static void debug_check_abort_helper(Handle exception, const char* message = NULL);
+  static void debug_check_abort(const char *value_string, const char* message = NULL);
 };
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/cms/TestCMSScavengeBeforeRemark.java	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,37 @@
+/*
+* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test TestCMSScavengeBeforeRemark
+ * @key gc
+ * @bug 8139868
+ * @requires vm.gc=="ConcMarkSweep" | vm.gc=="null"
+ * @summary Run CMS with CMSScavengeBeforeRemark
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+CMSScavengeBeforeRemark -XX:+ExplicitGCInvokesConcurrent -Xmx256m -XX:+PrintGCDetails TestCMSScavengeBeforeRemark
+ */
+
+public class TestCMSScavengeBeforeRemark {
+    public static void main(String args[]) throws Exception {
+        System.gc();
+    }
+}
--- a/hotspot/test/gc/g1/TestGCLogMessages.java	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/test/gc/g1/TestGCLogMessages.java	Thu Oct 22 16:25:34 2015 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test TestGCLogMessages
- * @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962
+ * @bug 8035406 8027295 8035398 8019342 8027959 8048179 8027962 8069330
  * @summary Ensure that the PrintGCDetails output for a minor GC with G1
  * includes the expected necessary messages.
  * @key gc
@@ -55,6 +55,8 @@
     };
 
     private LogMessageWithLevel allLogMessages[] = new LogMessageWithLevel[] {
+        // Update RS
+        new LogMessageWithLevel("Scan HCC (ms)", Level.FINER),
         // Ext Root Scan
         new LogMessageWithLevel("Thread Roots (ms)", Level.FINEST),
         new LogMessageWithLevel("StringTable Roots (ms)", Level.FINEST),
--- a/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/test/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Thu Oct 22 16:25:34 2015 -0700
@@ -52,6 +52,18 @@
         allOptionsAsMap.remove("CICompilerCount");
 
         /*
+         * JDK-8136766
+         * Temporarily remove ThreadStackSize from testing because Windows can set it to 0
+         * (for default OS size) but other platforms insist it must be greater than 0
+        */
+        allOptionsAsMap.remove("ThreadStackSize");
+
+        /*
+         * Exclude MallocMaxTestWords as it is expected to exit VM at small values (>=0)
+         */
+        allOptionsAsMap.remove("MallocMaxTestWords");
+
+        /*
          * Exclude below options as their maximum value would consume too much memory
          * and would affect other tests that run in parallel.
          */
--- a/hotspot/test/runtime/CommandLine/VMOptionsFile/TestVMOptionsFile.java	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/test/runtime/CommandLine/VMOptionsFile/TestVMOptionsFile.java	Thu Oct 22 16:25:34 2015 -0700
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 8061999
+ * @bug 8061999 8135195 8136552
  * @summary Test "-XX:VMOptionsFile" VM option
  * @library /testlibrary
  * @modules jdk.management
@@ -478,6 +478,7 @@
         runJavaCheckExitValue(pb, JVM_SUCCESS);
 
         outputShouldContain("interpreted mode");
+        outputShouldNotContain("VM option '+PrintVMOptions'");
         checkProperty("shared.property", "command_line_after");
         checkVMOption("MinHeapFreeRatio", "9");
 
@@ -547,13 +548,13 @@
         addVMOptionsFile(VM_OPTION_FILE_WITH_SAME_VM_OPTION_FILE);
 
         runJavaCheckExitValue(JVM_FAIL_WITH_EXIT_CODE_1);
-        outputShouldContain("VM options file is only supported on the command line");
+        outputShouldContain("The VM Options file can only be specified once and only on the command line.");
 
         /* Pass VM option file with VM option file option in it */
         addVMOptionsFile(VM_OPTION_FILE_WITH_VM_OPTION_FILE);
 
         runJavaCheckExitValue(JVM_FAIL_WITH_EXIT_CODE_1);
-        outputShouldContain("VM options file is only supported on the command line");
+        outputShouldContain("The VM Options file can only be specified once and only on the command line.");
 
         /* Pass VM option file which is not accessible (without read permissions) */
         addVMOptionsFile(getAbsolutePathFromSource(VM_OPTION_FILE_WITHOUT_READ_PERMISSIONS));
@@ -566,7 +567,7 @@
         addVMOptionsFile(VM_OPTION_FILE_2);
 
         runJavaCheckExitValue(JVM_FAIL_WITH_EXIT_CODE_1);
-        outputShouldContain("Only one VM Options file is supported on the command line");
+        outputShouldContain("The VM Options file can only be specified once and only on the command line.");
 
         /* Pass empty option file i.e. pass "-XX:VMOptionsFile=" */
         addVMOptionsFile("");
@@ -585,6 +586,22 @@
 
         runJavaCheckExitValue(JVM_FAIL_WITH_EXIT_CODE_1);
         outputShouldContain("Unmatched quote in");
+
+        /* Pass VM Option file in _JAVA_OPTIONS environment variable */
+        pb = createProcessBuilder();
+
+        updateEnvironment(pb, JAVA_OPTIONS, "-XX:VMOptionsFile=" + getAbsolutePathFromSource(VM_OPTION_FILE_1));
+
+        runJavaCheckExitValue(pb, JVM_FAIL_WITH_EXIT_CODE_1);
+        outputShouldContain("VM options file is only supported on the command line");
+
+        /* Pass VM Option file in JAVA_TOOL_OPTIONS environment variable */
+        pb = createProcessBuilder();
+
+        updateEnvironment(pb, JAVA_TOOL_OPTIONS, "-XX:VMOptionsFile=" + getAbsolutePathFromSource(VM_OPTION_FILE_1));
+
+        runJavaCheckExitValue(pb, JVM_FAIL_WITH_EXIT_CODE_1);
+        outputShouldContain("VM options file is only supported on the command line");
     }
 
     public static void main(String[] args) throws Exception {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/classFileParserBug/InitInInterface.java	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8139069
+ * @summary Check that any method named <init> in an interface causes ClassFormatError
+ * @compile nonvoidinit.jasm voidinit.jasm
+ * @run main InitInInterface
+ */
+
+// Test that an <init> method is not allowed in interfaces.
+public class InitInInterface {
+    public static void main(String args[]) throws Throwable {
+
+        System.out.println("Regression test for bug 8130183");
+        try {
+            Class newClass = Class.forName("nonvoidinit");
+            throw new RuntimeException(
+                 "ClassFormatError not thrown for non-void <init> in an interface");
+        } catch (java.lang.ClassFormatError e) {
+            if (!e.getMessage().contains("Interface cannot have a method named <init>")) {
+                throw new RuntimeException("Unexpected exception nonvoidint: " + e.getMessage());
+            }
+        }
+        try {
+            Class newClass = Class.forName("voidinit");
+            throw new RuntimeException(
+                 "ClassFormatError not thrown for void <init> in an interface");
+        } catch (java.lang.ClassFormatError e) {
+            if (!e.getMessage().contains("Interface cannot have a method named <init>")) {
+                throw new RuntimeException("Unexpected exception voidint: " + e.getMessage());
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/classFileParserBug/nonvoidinit.jasm	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Interface containing  non-void <init> method.
+public interface nonvoidinit version 50:0
+{
+
+    public abstract Method "<init>":"()I";
+
+} // end Class nonvoidinit
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/classFileParserBug/voidinit.jasm	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+// Interface containing void <init> method.
+public interface voidinit version 50:0
+{
+
+    public abstract Method "<init>":"()V";
+
+} // end Class voidinit
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/serviceability/dcmd/gc/FinalizationRunner.java	Thu Oct 22 16:25:34 2015 -0700
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.concurrent.CountDownLatch;
+
+import jdk.test.lib.dcmd.CommandExecutor;
+import jdk.test.lib.dcmd.JMXExecutor;
+
+public class FinalizationRunner {
+    public static final String FAILED = "Failed";
+    public static final String PASSED = "Passed";
+
+    static volatile boolean wasFinalized = false;
+    private static final CountDownLatch finRunLatch = new CountDownLatch(1);
+    private static final CountDownLatch finBlockLatch = new CountDownLatch(1);
+
+    static class MyObject {
+        @Override
+        protected void finalize() {
+            if (Thread.currentThread().getName().equals("Finalizer")) {
+                try {
+                    System.out.println("inside the regular finalizer thread; blocking");
+                    // 'regular' finalizer thread is ready to be effectively blocked -
+                    //    we can continue with the GC.run_finalization test
+                    finRunLatch.countDown();
+                    // prevent the 'regular' finalizer from finalizing this instance
+                    // until the GC.run_finalization has had its chance to do so
+                    finBlockLatch.await();
+                } catch (InterruptedException e) {
+                }
+            } else {
+                if (Thread.currentThread().getName().equals("Secondary finalizer")) {
+                    System.out.println("finalizing the test instance");
+                    // finalizing on behalf of GC.run_finalization -
+                    //   unblock the 'regular' finalizer and the test main method
+                    wasFinalized = true;
+                } else {
+                    fail("Unexpected finalizer thread name: " +
+                            Thread.currentThread().getName());
+                }
+                finBlockLatch.countDown();
+            }
+        }
+    }
+
+    // this instance will be used to provoke the regular finalization
+    // so the finalizer thread can be blocked for the duration of
+    // GC.run_finalization test
+    public static MyObject o1;
+
+    // this instance will be used to perform the GC.run_finalization test
+    public static MyObject o2;
+
+    private static void run(CommandExecutor executor) {
+        o2 = new MyObject();
+        o2 = null;
+        System.out.println("running GC.run_finalization");
+        System.gc();
+        executor.execute("GC.run_finalization");
+
+        System.out.println("Waiting for finalization");
+
+        try {
+            finBlockLatch.await();
+            if (wasFinalized) {
+                System.out.println(PASSED + ": Object was finalized");
+            } else {
+                fail("Object was not finalized");
+            }
+        } catch (InterruptedException e) {
+            fail("Interrupted while waiting for finalization", e);
+        }
+    }
+
+    public static void main(String ... args) {
+        System.out.println("\n=== FinalizationRunner");
+        try {
+            blockFinalizerThread();
+
+            Runtime.getRuntime().addShutdownHook(new Thread(()->{
+                run(new JMXExecutor());
+            }));
+        } catch (InterruptedException e) {
+            fail("Interrupted while trying to block the finalizer thread", e);
+        }
+    }
+
+    private static void blockFinalizerThread() throws InterruptedException {
+        System.out.println("trying to block the finalizer thread");
+        o1 = new MyObject();
+        o1 = null;
+        System.gc();
+        finRunLatch.await();
+    }
+
+    private static void fail(String msg, Exception e) {
+        fail(msg);
+        e.printStackTrace(System.err);
+    }
+
+    private static void fail(String msg) {
+        System.err.println(FAILED + ": " + msg);
+    }
+}
--- a/hotspot/test/serviceability/dcmd/gc/RunFinalizationTest.java	Thu Oct 22 11:13:08 2015 -0700
+++ b/hotspot/test/serviceability/dcmd/gc/RunFinalizationTest.java	Thu Oct 22 16:25:34 2015 -0700
@@ -21,13 +21,11 @@
  * questions.
  */
 
-import java.util.concurrent.Phaser;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.util.ArrayList;
+import java.util.List;
 
-import jdk.test.lib.dcmd.CommandExecutor;
-import jdk.test.lib.dcmd.JMXExecutor;
-import jdk.test.lib.Utils;
+import jdk.test.lib.OutputAnalyzer;
+import jdk.test.lib.ProcessTools;
 
 /*
  * @test
@@ -39,71 +37,21 @@
  *          jdk.jvmstat/sun.jvmstat.monitor
  * @build jdk.test.lib.*
  * @build jdk.test.lib.dcmd.*
- * @run main/othervm RunFinalizationTest
+ * @build RunFinalizationTest FinalizationRunner
+ * @run main RunFinalizationTest
  */
 public class RunFinalizationTest {
-    private static final long TIMEOUT = Utils.adjustTimeout(15000); // 15s
-    private static final Phaser ph = new Phaser(3);
-    static volatile boolean wasFinalized = false;
-    static volatile boolean wasInitialized = false;
-
-    static class MyObject {
-        public MyObject() {
-            /* Make sure object allocation/deallocation is not optimized out */
-            wasInitialized = true;
-        }
-
-        protected void finalize() {
-            if (!Thread.currentThread().getName().equals("Finalizer")) {
-                wasFinalized = true;
-                ph.arrive();
-            } else {
-                ph.arriveAndAwaitAdvance();
-            }
-        }
-    }
-
-    public static MyObject o;
-
-    private static void run(CommandExecutor executor) {
-        o = new MyObject();
-        o = null;
-        System.gc();
-        executor.execute("GC.run_finalization");
-
-        System.out.println("Waiting for signal from finalizer");
+    private final static String TEST_APP_NAME = "FinalizationRunner";
 
-        long targetTime = System.currentTimeMillis() + TIMEOUT;
-        while (System.currentTimeMillis() < targetTime) {
-            try {
-                ph.awaitAdvanceInterruptibly(ph.arrive(), 200, TimeUnit.MILLISECONDS);
-                System.out.println("Received signal");
-                break;
-            } catch (InterruptedException e) {
-                fail("Test error: Interrupted while waiting for signal from finalizer", e);
-            } catch (TimeoutException e) {
-                System.out.println("Haven't received signal in 200ms. Retrying ...");
-            }
-        }
+    public static void main(String ... args) throws Exception {
+        List<String> javaArgs = new ArrayList<>();
+        javaArgs.add("-cp");
+        javaArgs.add(System.getProperty("test.class.path"));
+        javaArgs.add(TEST_APP_NAME);
+        ProcessBuilder testAppPb = ProcessTools.createJavaProcessBuilder(javaArgs.toArray(new String[javaArgs.size()]));
 
-        if (!wasFinalized) {
-            fail("Test failure: Object was not finalized");
-        }
-    }
-
-    public static void main(String ... args) {
-        MyObject o = new MyObject();
-        o = null;
-        Runtime.getRuntime().addShutdownHook(new Thread(()->{
-            run(new JMXExecutor());
-        }));
-    }
-
-    private static void fail(String msg, Exception e) {
-        throw new Error(msg, e);
-    }
-
-    private static void fail(String msg) {
-        throw new Error(msg);
+        OutputAnalyzer out = ProcessTools.executeProcess(testAppPb);
+        out.stderrShouldNotMatch("^" + FinalizationRunner.FAILED + ".*")
+           .stdoutShouldMatch("^" + FinalizationRunner.PASSED + ".*");
     }
 }