Merge
authorjwilhelm
Mon, 24 Nov 2014 23:28:48 +0100
changeset 27884 563a811ad398
parent 27693 3eee985a97e7 (diff)
parent 27647 4dcb647196fd (current diff)
child 27885 7786b3940066
Merge
hotspot/test/gc/g1/TestHumongousShrinkHeap.java
hotspot/test/gc/g1/TestShrinkAuxiliaryData.java
--- a/hotspot/make/linux/makefiles/gcc.make	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/make/linux/makefiles/gcc.make	Mon Nov 24 23:28:48 2014 +0100
@@ -214,7 +214,7 @@
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
 endif
 
-WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wreturn-type
 
 ifeq ($(USE_CLANG),)
   # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -27,6 +27,7 @@
 #define CPU_PPC_VM_MACROASSEMBLER_PPC_HPP
 
 #include "asm/assembler.hpp"
+#include "utilities/macros.hpp"
 
 // MacroAssembler extends Assembler by a few frequently used macros.
 
--- a/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -4813,6 +4813,7 @@
     StubRoutines::_atomic_add_entry          = generate_atomic_add();
     StubRoutines::_atomic_xchg_ptr_entry     = StubRoutines::_atomic_xchg_entry;
     StubRoutines::_atomic_cmpxchg_ptr_entry  = StubRoutines::_atomic_cmpxchg_entry;
+    StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
     StubRoutines::_atomic_add_ptr_entry      = StubRoutines::_atomic_add_entry;
 #endif  // COMPILER2 !=> _LP64
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -1297,6 +1297,17 @@
   emit_operand(reg, adr);
 }
 
+// The 8-bit cmpxchg compares the value at adr with the contents of rax,
+// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
+// The ZF is set if the compared values were equal, and cleared otherwise.
+void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg
+  InstructionMark im(this);
+  prefix(adr, reg, true);
+  emit_int8(0x0F);
+  emit_int8((unsigned char)0xB0);
+  emit_operand(reg, adr);
+}
+
 void Assembler::comisd(XMMRegister dst, Address src) {
   // NOTE: dbx seems to decode this as comiss even though the
   // 0x66 is there. Strangly ucomisd comes out correct
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -1006,6 +1006,7 @@
 
   void cmpxchg8 (Address adr);
 
+  void cmpxchgb(Register reg, Address adr);
   void cmpxchgl(Register reg, Address adr);
 
   void cmpxchgq(Register reg, Address adr);
--- a/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -594,9 +594,35 @@
     return start;
   }
 
-  // Support for jint atomic::atomic_cmpxchg_long(jlong exchange_value,
-  //                                             volatile jlong* dest,
-  //                                             jlong compare_value)
+  // Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
+  //                                          jbyte compare_value)
+  //
+  // Arguments :
+  //    c_rarg0: exchange_value
+  //    c_rarg1: dest
+  //    c_rarg2: compare_value
+  //
+  // Result:
+  //    if ( compare_value == *dest ) {
+  //       *dest = exchange_value
+  //       return compare_value;
+  //    else
+  //       return *dest;
+  address generate_atomic_cmpxchg_byte() {
+    StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_byte");
+    address start = __ pc();
+
+    __ movsbq(rax, c_rarg2);
+   if ( os::is_MP() ) __ lock();
+    __ cmpxchgb(c_rarg0, Address(c_rarg1, 0));
+    __ ret(0);
+
+    return start;
+  }
+
+  // Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
+  //                                          volatile jlong* dest,
+  //                                          jlong compare_value)
   // Arguments :
   //    c_rarg0: exchange_value
   //    c_rarg1: dest
@@ -3894,6 +3920,7 @@
     StubRoutines::_atomic_xchg_entry         = generate_atomic_xchg();
     StubRoutines::_atomic_xchg_ptr_entry     = generate_atomic_xchg_ptr();
     StubRoutines::_atomic_cmpxchg_entry      = generate_atomic_cmpxchg();
+    StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
     StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
     StubRoutines::_atomic_add_entry          = generate_atomic_add();
     StubRoutines::_atomic_add_ptr_entry      = generate_atomic_add_ptr();
--- a/hotspot/src/cpu/x86/vm/x86_32.ad	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad	Mon Nov 24 23:28:48 2014 +0100
@@ -1210,6 +1210,7 @@
 
 
   Unimplemented();
+  return 0; // Mute compiler
 }
 
 #ifndef PRODUCT
--- a/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/cpu/zero/vm/stubGenerator_zero.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -207,6 +207,7 @@
     StubRoutines::_atomic_xchg_ptr_entry     = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_entry      = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_ptr_entry  = ShouldNotCallThisStub();
+    StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_entry          = ShouldNotCallThisStub();
     StubRoutines::_atomic_add_ptr_entry      = ShouldNotCallThisStub();
--- a/hotspot/src/os/aix/vm/perfMemory_aix.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os/aix/vm/perfMemory_aix.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -422,7 +422,7 @@
 // return the name of the user that owns the JVM indicated by the given vmid.
 //
 static char* get_user_name(int vmid, TRAPS) {
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os/bsd/vm/perfMemory_bsd.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -422,7 +422,7 @@
 // return the name of the user that owns the JVM indicated by the given vmid.
 //
 static char* get_user_name(int vmid, TRAPS) {
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -68,6 +68,7 @@
 #include "utilities/events.hpp"
 #include "utilities/elfFile.hpp"
 #include "utilities/growableArray.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
 // put OS-includes here
--- a/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os/linux/vm/perfMemory_linux.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -422,7 +422,7 @@
 // return the name of the user that owns the JVM indicated by the given vmid.
 //
 static char* get_user_name(int vmid, TRAPS) {
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os/solaris/vm/perfMemory_solaris.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -461,7 +461,7 @@
   // since the structured procfs and old procfs interfaces can't be
   // mixed, we attempt to find the file through a directory search.
 
-  return get_user_name_slow(vmid, CHECK_NULL);
+  return get_user_name_slow(vmid, THREAD);
 }
 
 // return the file name of the backing store file for the named
--- a/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -88,6 +88,15 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   int mp = os::is_MP();
--- a/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/atomic_linux_x86.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -88,6 +88,15 @@
   return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  int mp = os::is_MP();
+  __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+  return exchange_value;
+}
 
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   int mp = os::is_MP();
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -542,6 +542,7 @@
   err.report_and_die();
 
   ShouldNotReachHere();
+  return true; // Mute compiler
 }
 
 void os::Linux::init_thread_fpu_state(void) {
--- a/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -68,6 +68,8 @@
 extern "C" {
   jint _Atomic_add(jint add_value, volatile jint* dest IS_MP_DECL());
   jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
+  jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
+                       jbyte compare_value IS_MP_DECL());
   jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
                        jint compare_value IS_MP_DECL());
   jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
@@ -82,6 +84,11 @@
   return _Atomic_xchg(exchange_value, dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  return _Atomic_cmpxchg_byte(exchange_value, dest, compare_value IS_MP_ARG());
+}
+
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   return _Atomic_cmpxchg(exchange_value, dest, compare_value IS_MP_ARG());
 }
@@ -217,6 +224,15 @@
     return exchange_value;
   }
 
+
+  inline jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, int mp) {
+    __asm__ volatile (LOCK_IF_MP(%4) "cmpxchgb %1,(%3)"
+                    : "=a" (exchange_value)
+                    : "q" (exchange_value), "a" (compare_value), "r" (dest), "r" (mp)
+                    : "cc", "memory");
+    return exchange_value;
+  }
+
   // This is the interface to the atomic instruction in solaris_i486.s.
   jlong _Atomic_cmpxchg_long_gcc(jlong exchange_value, volatile jlong* dest, jlong compare_value, int mp);
 
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_32.il	Mon Nov 24 23:28:48 2014 +0100
@@ -76,6 +76,23 @@
       xchgl    (%ecx), %eax
       .end
 
+  // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, 
+  //                                   volatile jbyte *dest, 
+  //                                   jbyte compare_value)
+  // An additional bool (os::is_MP()) is passed as the last argument.
+      .inline _Atomic_cmpxchg_byte,4
+      movb     8(%esp), %al   // compare_value
+      movb     0(%esp), %cl   // exchange_value
+      movl     4(%esp), %edx   // dest
+      cmp      $0, 12(%esp)    // MP test
+      jne      1f
+      cmpxchgb %cl, (%edx)
+      jmp      2f
+1:    lock
+      cmpxchgb %cl, (%edx)
+2:
+      .end
+
   // Support for jint Atomic::cmpxchg(jint exchange_value, 
   //                                  volatile jint *dest, 
   //                                  jint compare_value)
--- a/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/solaris_x86/vm/solaris_x86_64.il	Mon Nov 24 23:28:48 2014 +0100
@@ -77,6 +77,15 @@
       movq     %rdi, %rax
       .end
 
+  // Support for jbyte Atomic::cmpxchg(jbyte exchange_value, 
+  //                                   volatile jbyte *dest, 
+  //                                   jbyte compare_value)
+      .inline _Atomic_cmpxchg_byte,3
+      movb     %dl, %al      // compare_value
+      lock
+      cmpxchgb %dil, (%rsi)
+      .end
+
   // Support for jint Atomic::cmpxchg(jint exchange_value, 
   //                                  volatile jint *dest, 
   //                                  jint compare_value)
--- a/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/atomic_windows_x86.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -123,6 +123,11 @@
   return (*os::atomic_cmpxchg_func)(exchange_value, dest, compare_value);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+    return (*os::atomic_cmpxchg_byte_func)(exchange_value, dest, compare_value);
+}
+
 inline jlong    Atomic::cmpxchg    (jlong    exchange_value, volatile jlong*    dest, jlong    compare_value) {
   return (*os::atomic_cmpxchg_long_func)(exchange_value, dest, compare_value);
 }
@@ -212,6 +217,19 @@
   return (void*)xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
+#define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+inline jbyte    Atomic::cmpxchg    (jbyte    exchange_value, volatile jbyte*    dest, jbyte    compare_value) {
+  // alternative for InterlockedCompareExchange
+  int mp = os::is_MP();
+  __asm {
+    mov edx, dest
+    mov cl, exchange_value
+    mov al, compare_value
+    LOCK_IF_MP(mp)
+    cmpxchg byte ptr [edx], cl
+  }
+}
+
 inline jint     Atomic::cmpxchg    (jint     exchange_value, volatile jint*     dest, jint     compare_value) {
   // alternative for InterlockedCompareExchange
   int mp = os::is_MP();
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -220,6 +220,7 @@
 typedef jint      xchg_func_t            (jint,     volatile jint*);
 typedef intptr_t  xchg_ptr_func_t        (intptr_t, volatile intptr_t*);
 typedef jint      cmpxchg_func_t         (jint,     volatile jint*,  jint);
+typedef jbyte     cmpxchg_byte_func_t    (jbyte,    volatile jbyte*, jbyte);
 typedef jlong     cmpxchg_long_func_t    (jlong,    volatile jlong*, jlong);
 typedef jint      add_func_t             (jint,     volatile jint*);
 typedef intptr_t  add_ptr_func_t         (intptr_t, volatile intptr_t*);
@@ -272,6 +273,23 @@
     *dest = exchange_value;
   return old_value;
 }
+
+jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+  // try to use the stub:
+  cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
+
+  if (func != NULL) {
+    os::atomic_cmpxchg_byte_func = func;
+    return (*func)(exchange_value, dest, compare_value);
+  }
+  assert(Threads::number_of_threads() == 0, "for bootstrap only");
+
+  jbyte old_value = *dest;
+  if (old_value == compare_value)
+    *dest = exchange_value;
+  return old_value;
+}
+
 #endif // AMD64
 
 jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
@@ -321,6 +339,7 @@
 xchg_func_t*         os::atomic_xchg_func         = os::atomic_xchg_bootstrap;
 xchg_ptr_func_t*     os::atomic_xchg_ptr_func     = os::atomic_xchg_ptr_bootstrap;
 cmpxchg_func_t*      os::atomic_cmpxchg_func      = os::atomic_cmpxchg_bootstrap;
+cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
 add_func_t*          os::atomic_add_func          = os::atomic_add_bootstrap;
 add_ptr_func_t*      os::atomic_add_ptr_func      = os::atomic_add_ptr_bootstrap;
 
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -33,6 +33,7 @@
   static intptr_t  (*atomic_xchg_ptr_func)      (intptr_t,  volatile intptr_t*);
 
   static jint      (*atomic_cmpxchg_func)       (jint,      volatile jint*,  jint);
+  static jbyte     (*atomic_cmpxchg_byte_func)  (jbyte,     volatile jbyte*, jbyte);
   static jlong     (*atomic_cmpxchg_long_func)  (jlong,     volatile jlong*, jlong);
 
   static jint      (*atomic_add_func)           (jint,      volatile jint*);
@@ -42,6 +43,7 @@
   static intptr_t  atomic_xchg_ptr_bootstrap    (intptr_t,  volatile intptr_t*);
 
   static jint      atomic_cmpxchg_bootstrap     (jint,      volatile jint*,  jint);
+  static jbyte     atomic_cmpxchg_byte_bootstrap(jbyte,     volatile jbyte*, jbyte);
 #else
 
   static jlong (*atomic_cmpxchg_long_func)  (jlong, volatile jlong*, jlong);
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -46,6 +46,7 @@
 #include "oops/oop.inline.hpp"
 #include "oops/oop.inline2.hpp"
 #include "runtime/fieldType.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 # include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #endif
--- a/hotspot/src/share/vm/ci/ciReplay.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/ci/ciReplay.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -332,7 +332,7 @@
   // Lookup a klass
   Klass* resolve_klass(const char* klass, TRAPS) {
     Symbol* klass_name = SymbolTable::lookup(klass, (int)strlen(klass), CHECK_NULL);
-    return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, CHECK_NULL);
+    return SystemDictionary::resolve_or_fail(klass_name, _loader, _protection_domain, true, THREAD);
   }
 
   // Parse the standard tuple of <klass> <name> <signature>
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -31,9 +31,6 @@
 #include "classfile/javaClasses.hpp"
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
-#if INCLUDE_CDS
-#include "classfile/systemDictionaryShared.hpp"
-#endif
 #include "classfile/verificationType.hpp"
 #include "classfile/verifier.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -63,7 +60,11 @@
 #include "services/threadService.hpp"
 #include "utilities/array.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
+#if INCLUDE_CDS
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 
 // We generally try to create the oops directly when parsing, rather than
 // allocating temporary data structures and copying the bytes twice. A
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -29,10 +29,6 @@
 #include "classfile/classLoaderExt.hpp"
 #include "classfile/classLoaderData.inline.hpp"
 #include "classfile/javaClasses.hpp"
-#if INCLUDE_CDS
-#include "classfile/sharedPathsMiscInfo.hpp"
-#include "classfile/sharedClassUtil.hpp"
-#endif
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
@@ -64,8 +60,13 @@
 #include "services/management.hpp"
 #include "services/threadService.hpp"
 #include "utilities/events.hpp"
-#include "utilities/hashtable.hpp"
 #include "utilities/hashtable.inline.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#endif
+
 
 // Entry points in zip.dll for loading zip/jar file entries
 
--- a/hotspot/src/share/vm/classfile/classLoader.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -27,6 +27,7 @@
 
 #include "classfile/classFileParser.hpp"
 #include "runtime/perfData.hpp"
+#include "utilities/macros.hpp"
 
 // The VM class loader.
 #include <sys/stat.h>
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -472,7 +472,7 @@
 // These anonymous class loaders are to contain classes used for JSR292
 ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
   // Add a new class loader data to the graph.
-  return ClassLoaderDataGraph::add(loader, true, CHECK_NULL);
+  return ClassLoaderDataGraph::add(loader, true, THREAD);
 }
 
 const char* ClassLoaderData::loader_name() {
--- a/hotspot/src/share/vm/classfile/defaultMethods.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/defaultMethods.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -493,7 +493,7 @@
 };
 
 Symbol* MethodFamily::generate_no_defaults_message(TRAPS) const {
-  return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
+  return SymbolTable::new_symbol("No qualifying defaults found", THREAD);
 }
 
 Symbol* MethodFamily::generate_method_message(Symbol *klass_name, Method* method, TRAPS) const {
@@ -506,7 +506,7 @@
   ss.write((const char*)name->bytes(), name->utf8_length());
   ss.write((const char*)signature->bytes(), signature->utf8_length());
   ss.print(" is abstract");
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
 }
 
 Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
@@ -521,7 +521,7 @@
     ss.print(".");
     ss.write((const char*)name->bytes(), name->utf8_length());
   }
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
+  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
 }
 
 
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -1952,7 +1952,7 @@
   // This class is eagerly initialized during VM initialization, since we keep a refence
   // to one of the methods
   assert(InstanceKlass::cast(klass)->is_initialized(), "must be initialized");
-  return InstanceKlass::cast(klass)->allocate_instance_handle(CHECK_NH);
+  return InstanceKlass::cast(klass)->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Method::clazz(oop reflect) {
@@ -2130,7 +2130,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Constructor::clazz(oop reflect) {
@@ -2270,7 +2270,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Field::clazz(oop reflect) {
@@ -2397,7 +2397,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 oop java_lang_reflect_Parameter::name(oop param) {
@@ -2447,7 +2447,7 @@
   instanceKlassHandle klass (THREAD, k);
   // Ensure it is initialized
   klass->initialize(CHECK_NH);
-  return klass->allocate_instance_handle(CHECK_NH);
+  return klass->allocate_instance_handle(THREAD);
 }
 
 
--- a/hotspot/src/share/vm/classfile/stringTable.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/stringTable.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -36,6 +36,7 @@
 #include "runtime/atomic.inline.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "utilities/hashtable.inline.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1StringDedup.hpp"
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -235,7 +235,7 @@
   MutexLocker ml(SymbolTable_lock, THREAD);
 
   // Otherwise, add to symbol to table
-  return the_table()->basic_add(index, (u1*)name, len, hashValue, true, CHECK_NULL);
+  return the_table()->basic_add(index, (u1*)name, len, hashValue, true, THREAD);
 }
 
 Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) {
@@ -274,7 +274,7 @@
   // Grab SymbolTable_lock first.
   MutexLocker ml(SymbolTable_lock, THREAD);
 
-  return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, CHECK_NULL);
+  return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, THREAD);
 }
 
 Symbol* SymbolTable::lookup_only(const char* name, int len,
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -31,10 +31,6 @@
 #include "classfile/resolutionErrors.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
-#if INCLUDE_CDS
-#include "classfile/sharedClassUtil.hpp"
-#include "classfile/systemDictionaryShared.hpp"
-#endif
 #include "classfile/vmSymbols.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/bytecodeStream.hpp"
@@ -65,7 +61,10 @@
 #include "services/threadService.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ticks.hpp"
-
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #if INCLUDE_TRACE
  #include "trace/tracing.hpp"
 #endif
@@ -123,7 +122,7 @@
 
 ClassLoaderData* SystemDictionary::register_loader(Handle class_loader, TRAPS) {
   if (class_loader() == NULL) return ClassLoaderData::the_null_class_loader_data();
-  return ClassLoaderDataGraph::find_or_create(class_loader, CHECK_NULL);
+  return ClassLoaderDataGraph::find_or_create(class_loader, THREAD);
 }
 
 // ----------------------------------------------------------------------------
@@ -233,15 +232,15 @@
                  class_name->as_C_string(),
                  class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string()));
   if (FieldType::is_array(class_name)) {
-    return resolve_array_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+    return resolve_array_class_or_null(class_name, class_loader, protection_domain, THREAD);
   } else if (FieldType::is_obj(class_name)) {
     ResourceMark rm(THREAD);
     // Ignore wrapping L and ;.
     TempNewSymbol name = SymbolTable::new_symbol(class_name->as_C_string() + 1,
                                    class_name->utf8_length() - 2, CHECK_NULL);
-    return resolve_instance_class_or_null(name, class_loader, protection_domain, CHECK_NULL);
+    return resolve_instance_class_or_null(name, class_loader, protection_domain, THREAD);
   } else {
-    return resolve_instance_class_or_null(class_name, class_loader, protection_domain, CHECK_NULL);
+    return resolve_instance_class_or_null(class_name, class_loader, protection_domain, THREAD);
   }
 }
 
--- a/hotspot/src/share/vm/classfile/verificationType.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/verificationType.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -289,7 +289,7 @@
           if (is_reference() && from.is_reference()) {
             return is_reference_assignable_from(from, context,
                                                 from_field_is_protected,
-                                                CHECK_false);
+                                                THREAD);
           } else {
             return false;
           }
--- a/hotspot/src/share/vm/classfile/verifier.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/classfile/verifier.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -1927,7 +1927,7 @@
 
   return SystemDictionary::resolve_or_fail(
     name, Handle(THREAD, loader), Handle(THREAD, protection_domain),
-    true, CHECK_NULL);
+    true, THREAD);
 }
 
 bool ClassVerifier::is_protected_access(instanceKlassHandle this_class,
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -192,7 +192,6 @@
      FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
   CardGeneration(rs, initial_byte_size, level, ct),
   _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
-  _debug_collection_type(Concurrent_collection_type),
   _did_compact(false)
 {
   HeapWord* bottom = (HeapWord*) _virtual_space.low();
@@ -612,8 +611,6 @@
   // Clip CMSBootstrapOccupancy between 0 and 100.
   _bootstrap_occupancy = ((double)CMSBootstrapOccupancy)/(double)100;
 
-  _full_gcs_since_conc_gc = 0;
-
   // Now tell CMS generations the identity of their collector
   ConcurrentMarkSweepGeneration::set_collector(this);
 
@@ -1247,20 +1244,6 @@
     return true;
   }
 
-  // For debugging purposes, change the type of collection.
-  // If the rotation is not on the concurrent collection
-  // type, don't start a concurrent collection.
-  NOT_PRODUCT(
-    if (RotateCMSCollectionTypes &&
-        (_cmsGen->debug_collection_type() !=
-          ConcurrentMarkSweepGeneration::Concurrent_collection_type)) {
-      assert(_cmsGen->debug_collection_type() !=
-        ConcurrentMarkSweepGeneration::Unknown_collection_type,
-        "Bad cms collection type");
-      return false;
-    }
-  )
-
   FreelistLocker x(this);
   // ------------------------------------------------------------------
   // Print out lots of information which affects the initiation of
@@ -1441,16 +1424,6 @@
                            size_t size,
                            bool   tlab)
 {
-  if (!UseCMSCollectionPassing && _collectorState > Idling) {
-    // For debugging purposes skip the collection if the state
-    // is not currently idle
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " skipped full:%d CMS state %d",
-        Thread::current(), full, _collectorState);
-    }
-    return;
-  }
-
   // The following "if" branch is present for defensive reasons.
   // In the current uses of this interface, it can be replaced with:
   // assert(!GC_locker.is_active(), "Can't be called otherwise");
@@ -1466,7 +1439,6 @@
     return;
   }
   acquire_control_and_collect(full, clear_all_soft_refs);
-  _full_gcs_since_conc_gc++;
 }
 
 void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
@@ -1636,66 +1608,52 @@
     gclog_or_tty->print_cr("    gets control with state %d", _collectorState);
   }
 
-  // Check if we need to do a compaction, or if not, whether
-  // we need to start the mark-sweep from scratch.
-  bool should_compact    = false;
-  bool should_start_over = false;
-  decide_foreground_collection_type(clear_all_soft_refs,
-    &should_compact, &should_start_over);
-
-NOT_PRODUCT(
-  if (RotateCMSCollectionTypes) {
-    if (_cmsGen->debug_collection_type() ==
-        ConcurrentMarkSweepGeneration::MSC_foreground_collection_type) {
-      should_compact = true;
-    } else if (_cmsGen->debug_collection_type() ==
-               ConcurrentMarkSweepGeneration::MS_foreground_collection_type) {
-      should_compact = false;
-    }
-  }
-)
+  // Inform cms gen if this was due to partial collection failing.
+  // The CMS gen may use this fact to determine its expansion policy.
+  GenCollectedHeap* gch = GenCollectedHeap::heap();
+  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
+    assert(!_cmsGen->incremental_collection_failed(),
+           "Should have been noticed, reacted to and cleared");
+    _cmsGen->set_incremental_collection_failed();
+  }
 
   if (first_state > Idling) {
     report_concurrent_mode_interruption();
   }
 
-  set_did_compact(should_compact);
-  if (should_compact) {
-    // If the collection is being acquired from the background
-    // collector, there may be references on the discovered
-    // references lists that have NULL referents (being those
-    // that were concurrently cleared by a mutator) or
-    // that are no longer active (having been enqueued concurrently
-    // by the mutator).
-    // Scrub the list of those references because Mark-Sweep-Compact
-    // code assumes referents are not NULL and that all discovered
-    // Reference objects are active.
-    ref_processor()->clean_up_discovered_references();
-
-    if (first_state > Idling) {
-      save_heap_summary();
-    }
-
-    do_compaction_work(clear_all_soft_refs);
-
-    // Has the GC time limit been exceeded?
-    DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
-    size_t max_eden_size = young_gen->max_capacity() -
-                           young_gen->to()->capacity() -
-                           young_gen->from()->capacity();
-    GenCollectedHeap* gch = GenCollectedHeap::heap();
-    GCCause::Cause gc_cause = gch->gc_cause();
-    size_policy()->check_gc_overhead_limit(_young_gen->used(),
-                                           young_gen->eden()->used(),
-                                           _cmsGen->max_capacity(),
-                                           max_eden_size,
-                                           full,
-                                           gc_cause,
-                                           gch->collector_policy());
-  } else {
-    do_mark_sweep_work(clear_all_soft_refs, first_state,
-      should_start_over);
-  }
+  set_did_compact(true);
+
+  // If the collection is being acquired from the background
+  // collector, there may be references on the discovered
+  // references lists that have NULL referents (being those
+  // that were concurrently cleared by a mutator) or
+  // that are no longer active (having been enqueued concurrently
+  // by the mutator).
+  // Scrub the list of those references because Mark-Sweep-Compact
+  // code assumes referents are not NULL and that all discovered
+  // Reference objects are active.
+  ref_processor()->clean_up_discovered_references();
+
+  if (first_state > Idling) {
+    save_heap_summary();
+  }
+
+  do_compaction_work(clear_all_soft_refs);
+
+  // Has the GC time limit been exceeded?
+  DefNewGeneration* young_gen = _young_gen->as_DefNewGeneration();
+  size_t max_eden_size = young_gen->max_capacity() -
+                         young_gen->to()->capacity() -
+                         young_gen->from()->capacity();
+  GCCause::Cause gc_cause = gch->gc_cause();
+  size_policy()->check_gc_overhead_limit(_young_gen->used(),
+                                         young_gen->eden()->used(),
+                                         _cmsGen->max_capacity(),
+                                         max_eden_size,
+                                         full,
+                                         gc_cause,
+                                         gch->collector_policy());
+
   // Reset the expansion cause, now that we just completed
   // a collection cycle.
   clear_expansion_cause();
@@ -1713,68 +1671,6 @@
   _cmsGen->compute_new_size_free_list();
 }
 
-// A work method used by foreground collection to determine
-// what type of collection (compacting or not, continuing or fresh)
-// it should do.
-// NOTE: the intent is to make UseCMSCompactAtFullCollection
-// and CMSCompactWhenClearAllSoftRefs the default in the future
-// and do away with the flags after a suitable period.
-void CMSCollector::decide_foreground_collection_type(
-  bool clear_all_soft_refs, bool* should_compact,
-  bool* should_start_over) {
-  // Normally, we'll compact only if the UseCMSCompactAtFullCollection
-  // flag is set, and we have either requested a System.gc() or
-  // the number of full gc's since the last concurrent cycle
-  // has exceeded the threshold set by CMSFullGCsBeforeCompaction,
-  // or if an incremental collection has failed
-  GenCollectedHeap* gch = GenCollectedHeap::heap();
-  assert(gch->collector_policy()->is_generation_policy(),
-         "You may want to check the correctness of the following");
-  // Inform cms gen if this was due to partial collection failing.
-  // The CMS gen may use this fact to determine its expansion policy.
-  if (gch->incremental_collection_will_fail(false /* don't consult_young */)) {
-    assert(!_cmsGen->incremental_collection_failed(),
-           "Should have been noticed, reacted to and cleared");
-    _cmsGen->set_incremental_collection_failed();
-  }
-  *should_compact =
-    UseCMSCompactAtFullCollection &&
-    ((_full_gcs_since_conc_gc >= CMSFullGCsBeforeCompaction) ||
-     GCCause::is_user_requested_gc(gch->gc_cause()) ||
-     gch->incremental_collection_will_fail(true /* consult_young */));
-  *should_start_over = false;
-  if (clear_all_soft_refs && !*should_compact) {
-    // We are about to do a last ditch collection attempt
-    // so it would normally make sense to do a compaction
-    // to reclaim as much space as possible.
-    if (CMSCompactWhenClearAllSoftRefs) {
-      // Default: The rationale is that in this case either
-      // we are past the final marking phase, in which case
-      // we'd have to start over, or so little has been done
-      // that there's little point in saving that work. Compaction
-      // appears to be the sensible choice in either case.
-      *should_compact = true;
-    } else {
-      // We have been asked to clear all soft refs, but not to
-      // compact. Make sure that we aren't past the final checkpoint
-      // phase, for that is where we process soft refs. If we are already
-      // past that phase, we'll need to redo the refs discovery phase and
-      // if necessary clear soft refs that weren't previously
-      // cleared. We do so by remembering the phase in which
-      // we came in, and if we are past the refs processing
-      // phase, we'll choose to just redo the mark-sweep
-      // collection from scratch.
-      if (_collectorState > FinalMarking) {
-        // We are past the refs processing phase;
-        // start over and do a fresh synchronous CMS cycle
-        _collectorState = Resetting; // skip to reset to start new cycle
-        reset(false /* == !asynch */);
-        *should_start_over = true;
-      } // else we can continue a possibly ongoing current cycle
-    }
-  }
-}
-
 // A work method used by the foreground collector to do
 // a mark-sweep-compact.
 void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
@@ -1787,10 +1683,6 @@
   gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
 
   GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL, gc_tracer->gc_id());
-  if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
-    gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
-      "collections passed to foreground collector", _full_gcs_since_conc_gc);
-  }
 
   // Temporarily widen the span of the weak reference processing to
   // the entire heap.
@@ -1852,7 +1744,7 @@
   _collectorState = Resetting;
   assert(_restart_addr == NULL,
          "Should have been NULL'd before baton was passed");
-  reset(false /* == !asynch */);
+  reset(false /* == !concurrent */);
   _cmsGen->reset_after_compaction();
   _concurrent_cycles_since_last_unload = 0;
 
@@ -1875,40 +1767,6 @@
   // in the heap's do_collection() method.
 }
 
-// A work method used by the foreground collector to do
-// a mark-sweep, after taking over from a possibly on-going
-// concurrent mark-sweep collection.
-void CMSCollector::do_mark_sweep_work(bool clear_all_soft_refs,
-  CollectorState first_state, bool should_start_over) {
-  if (PrintGC && Verbose) {
-    gclog_or_tty->print_cr("Pass concurrent collection to foreground "
-      "collector with count %d",
-      _full_gcs_since_conc_gc);
-  }
-  switch (_collectorState) {
-    case Idling:
-      if (first_state == Idling || should_start_over) {
-        // The background GC was not active, or should
-        // restarted from scratch;  start the cycle.
-        _collectorState = InitialMarking;
-      }
-      // If first_state was not Idling, then a background GC
-      // was in progress and has now finished.  No need to do it
-      // again.  Leave the state as Idling.
-      break;
-    case Precleaning:
-      // In the foreground case don't do the precleaning since
-      // it is not done concurrently and there is extra work
-      // required.
-      _collectorState = FinalMarking;
-  }
-  collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
-
-  // For a mark-sweep, compute_new_size() will be called
-  // in the heap's do_collection() method.
-}
-
-
 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
   ContiguousSpace* eden_space = dng->eden();
@@ -1989,13 +1847,7 @@
   }
 };
 
-// There are separate collect_in_background and collect_in_foreground because of
-// the different locking requirements of the background collector and the
-// foreground collector.  There was originally an attempt to share
-// one "collect" method between the background collector and the foreground
-// collector but the if-then-else required made it cleaner to have
-// separate methods.
-void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
+void CMSCollector::collect_in_background(GCCause::Cause cause) {
   assert(Thread::current()->is_ConcurrentGC_thread(),
     "A CMS asynchronous collection is only allowed on a CMS thread.");
 
@@ -2036,7 +1888,7 @@
   // Used for PrintGC
   size_t prev_used;
   if (PrintGC && Verbose) {
-    prev_used = _cmsGen->used(); // XXXPERM
+    prev_used = _cmsGen->used();
   }
 
   // The change of the collection state is normally done at this level;
@@ -2116,7 +1968,7 @@
         break;
       case Marking:
         // initial marking in checkpointRootsInitialWork has been completed
-        if (markFromRoots(true)) { // we were successful
+        if (markFromRoots()) { // we were successful
           assert(_collectorState == Precleaning, "Collector state should "
             "have changed");
         } else {
@@ -2146,10 +1998,9 @@
         break;
       case Sweeping:
         // final marking in checkpointRootsFinal has been completed
-        sweep(true);
+        sweep();
         assert(_collectorState == Resizing, "Collector state change "
           "to Resizing must be done under the free_list_lock");
-        _full_gcs_since_conc_gc = 0;
 
       case Resizing: {
         // Sweeping has been completed...
@@ -2222,12 +2073,6 @@
   }
 }
 
-void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
-  if (!_cms_start_registered) {
-    register_gc_start(cause);
-  }
-}
-
 void CMSCollector::register_gc_start(GCCause::Cause cause) {
   _cms_start_registered = true;
   _gc_timer_cm->register_gc_start();
@@ -2255,120 +2100,6 @@
   _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
 }
 
-void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
-  assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
-         "Foreground collector should be waiting, not executing");
-  assert(Thread::current()->is_VM_thread(), "A foreground collection"
-    "may only be done by the VM Thread with the world stopped");
-  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-         "VM thread should have CMS token");
-
-  // The gc id is created in register_foreground_gc_start if this collection is synchronous
-  const GCId gc_id = _collectorState == InitialMarking ? GCId::peek() : _gc_tracer_cm->gc_id();
-  NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
-    true, NULL, gc_id);)
-  COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact);
-
-  HandleMark hm;  // Discard invalid handles created during verification
-
-  if (VerifyBeforeGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    Universe::verify();
-  }
-
-  // Snapshot the soft reference policy to be used in this collection cycle.
-  ref_processor()->setup_policy(clear_all_soft_refs);
-
-  // Decide if class unloading should be done
-  update_should_unload_classes();
-
-  bool init_mark_was_synchronous = false; // until proven otherwise
-  while (_collectorState != Idling) {
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("Thread " INTPTR_FORMAT " in CMS state %d",
-        Thread::current(), _collectorState);
-    }
-    switch (_collectorState) {
-      case InitialMarking:
-        register_foreground_gc_start(cause);
-        init_mark_was_synchronous = true;  // fact to be exploited in re-mark
-        checkpointRootsInitial(false);
-        assert(_collectorState == Marking, "Collector state should have changed"
-          " within checkpointRootsInitial()");
-        break;
-      case Marking:
-        // initial marking in checkpointRootsInitialWork has been completed
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before initial mark: ");
-        }
-        {
-          bool res = markFromRoots(false);
-          assert(res && _collectorState == FinalMarking, "Collector state should "
-            "have changed");
-          break;
-        }
-      case FinalMarking:
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before re-mark: ");
-        }
-        checkpointRootsFinal(false, clear_all_soft_refs,
-                             init_mark_was_synchronous);
-        assert(_collectorState == Sweeping, "Collector state should not "
-          "have changed within checkpointRootsFinal()");
-        break;
-      case Sweeping:
-        // final marking in checkpointRootsFinal has been completed
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before sweep: ");
-        }
-        sweep(false);
-        assert(_collectorState == Resizing, "Incorrect state");
-        break;
-      case Resizing: {
-        // Sweeping has been completed; the actual resize in this case
-        // is done separately; nothing to be done in this state.
-        _collectorState = Resetting;
-        break;
-      }
-      case Resetting:
-        // The heap has been resized.
-        if (VerifyDuringGC &&
-            GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-          Universe::verify("Verify before reset: ");
-        }
-        save_heap_summary();
-        reset(false);
-        assert(_collectorState == Idling, "Collector state should "
-          "have changed");
-        break;
-      case Precleaning:
-      case AbortablePreclean:
-        // Elide the preclean phase
-        _collectorState = FinalMarking;
-        break;
-      default:
-        ShouldNotReachHere();
-    }
-    if (TraceCMSState) {
-      gclog_or_tty->print_cr("  Thread " INTPTR_FORMAT " done - next CMS state %d",
-        Thread::current(), _collectorState);
-    }
-  }
-
-  if (VerifyAfterGC &&
-      GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    Universe::verify();
-  }
-  if (TraceCMSState) {
-    gclog_or_tty->print_cr("CMS Thread " INTPTR_FORMAT
-      " exiting collection CMS state %d",
-      Thread::current(), _collectorState);
-  }
-}
-
 bool CMSCollector::waitForForegroundGC() {
   bool res = false;
   assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
@@ -3345,7 +3076,7 @@
 // Checkpoint the roots into this generation from outside
 // this generation. [Note this initial checkpoint need only
 // be approximate -- we'll do a catch up phase subsequently.]
-void CMSCollector::checkpointRootsInitial(bool asynch) {
+void CMSCollector::checkpointRootsInitial() {
   assert(_collectorState == InitialMarking, "Wrong collector state");
   check_correct_thread_executing();
   TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
@@ -3356,32 +3087,19 @@
   ReferenceProcessor* rp = ref_processor();
   SpecializationStats::clear();
   assert(_restart_addr == NULL, "Control point invariant");
-  if (asynch) {
+  {
     // acquire locks for subsequent manipulations
     MutexLockerEx x(bitMapLock(),
                     Mutex::_no_safepoint_check_flag);
-    checkpointRootsInitialWork(asynch);
+    checkpointRootsInitialWork();
     // enable ("weak") refs discovery
     rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
     _collectorState = Marking;
-  } else {
-    // (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
-    // which recognizes if we are a CMS generation, and doesn't try to turn on
-    // discovery; verify that they aren't meddling.
-    assert(!rp->discovery_is_atomic(),
-           "incorrect setting of discovery predicate");
-    assert(!rp->discovery_enabled(), "genCollectedHeap shouldn't control "
-           "ref discovery for this generation kind");
-    // already have locks
-    checkpointRootsInitialWork(asynch);
-    // now enable ("weak") refs discovery
-    rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
-    _collectorState = Marking;
   }
   SpecializationStats::print();
 }
 
-void CMSCollector::checkpointRootsInitialWork(bool asynch) {
+void CMSCollector::checkpointRootsInitialWork() {
   assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
   assert(_collectorState == InitialMarking, "just checking");
 
@@ -3483,9 +3201,9 @@
   verify_overflow_empty();
 }
 
-bool CMSCollector::markFromRoots(bool asynch) {
+bool CMSCollector::markFromRoots() {
   // we might be tempted to assert that:
-  // assert(asynch == !SafepointSynchronize::is_at_safepoint(),
+  // assert(!SafepointSynchronize::is_at_safepoint(),
   //        "inconsistent argument?");
   // However that wouldn't be right, because it's possible that
   // a safepoint is indeed in progress as a younger generation
@@ -3494,37 +3212,28 @@
   check_correct_thread_executing();
   verify_overflow_empty();
 
-  bool res;
-  if (asynch) {
-    // Weak ref discovery note: We may be discovering weak
-    // refs in this generation concurrent (but interleaved) with
-    // weak ref discovery by a younger generation collector.
-
-    CMSTokenSyncWithLocks ts(true, bitMapLock());
-    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
-    CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
-    res = markFromRootsWork(asynch);
-    if (res) {
-      _collectorState = Precleaning;
-    } else { // We failed and a foreground collection wants to take over
-      assert(_foregroundGCIsActive, "internal state inconsistency");
-      assert(_restart_addr == NULL,  "foreground will restart from scratch");
-      if (PrintGCDetails) {
-        gclog_or_tty->print_cr("bailing out to foreground collection");
-      }
-    }
-  } else {
-    assert(SafepointSynchronize::is_at_safepoint(),
-           "inconsistent with asynch == false");
-    // already have locks
-    res = markFromRootsWork(asynch);
-    _collectorState = FinalMarking;
+  // Weak ref discovery note: We may be discovering weak
+  // refs in this generation concurrent (but interleaved) with
+  // weak ref discovery by a younger generation collector.
+
+  CMSTokenSyncWithLocks ts(true, bitMapLock());
+  TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
+  CMSPhaseAccounting pa(this, "mark", _gc_tracer_cm->gc_id(), !PrintGCDetails);
+  bool res = markFromRootsWork();
+  if (res) {
+    _collectorState = Precleaning;
+  } else { // We failed and a foreground collection wants to take over
+    assert(_foregroundGCIsActive, "internal state inconsistency");
+    assert(_restart_addr == NULL,  "foreground will restart from scratch");
+    if (PrintGCDetails) {
+      gclog_or_tty->print_cr("bailing out to foreground collection");
+    }
   }
   verify_overflow_empty();
   return res;
 }
 
-bool CMSCollector::markFromRootsWork(bool asynch) {
+bool CMSCollector::markFromRootsWork() {
   // iterate over marked bits in bit map, doing a full scan and mark
   // from these roots using the following algorithm:
   // . if oop is to the right of the current scan pointer,
@@ -3549,9 +3258,9 @@
   verify_overflow_empty();
   bool result = false;
   if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
-    result = do_marking_mt(asynch);
+    result = do_marking_mt();
   } else {
-    result = do_marking_st(asynch);
+    result = do_marking_st();
   }
   return result;
 }
@@ -3591,7 +3300,6 @@
 class CMSConcMarkingTask: public YieldingFlexibleGangTask {
   CMSCollector* _collector;
   int           _n_workers;                  // requested/desired # workers
-  bool          _asynch;
   bool          _result;
   CompactibleFreeListSpace*  _cms_space;
   char          _pad_front[64];   // padding to ...
@@ -3612,13 +3320,12 @@
  public:
   CMSConcMarkingTask(CMSCollector* collector,
                  CompactibleFreeListSpace* cms_space,
-                 bool asynch,
                  YieldingFlexibleWorkGang* workers,
                  OopTaskQueueSet* task_queues):
     YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
     _collector(collector),
     _cms_space(cms_space),
-    _asynch(asynch), _n_workers(0), _result(true),
+    _n_workers(0), _result(true),
     _task_queues(task_queues),
     _term(_n_workers, task_queues, _collector),
     _bit_map_lock(collector->bitMapLock())
@@ -3645,8 +3352,7 @@
   void work(uint worker_id);
   bool should_yield() {
     return    ConcurrentMarkSweepThread::should_yield()
-           && !_collector->foregroundGCIsActive()
-           && _asynch;
+           && !_collector->foregroundGCIsActive();
   }
 
   virtual void coordinator_yield();  // stuff done by coordinator
@@ -3878,8 +3584,7 @@
         Par_MarkFromRootsClosure cl(this, _collector, my_span,
                                     &_collector->_markBitMap,
                                     work_queue(i),
-                                    &_collector->_markStack,
-                                    _asynch);
+                                    &_collector->_markStack);
         _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
       } // else nothing to do for this task
     }   // else nothing to do for this task
@@ -4084,7 +3789,7 @@
   _collector->startTimer();
 }
 
-bool CMSCollector::do_marking_mt(bool asynch) {
+bool CMSCollector::do_marking_mt() {
   assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
   int num_workers = AdaptiveSizePolicy::calc_active_conc_workers(
                                        conc_workers()->total_workers(),
@@ -4096,7 +3801,6 @@
 
   CMSConcMarkingTask tsk(this,
                          cms_space,
-                         asynch,
                          conc_workers(),
                          task_queues());
 
@@ -4125,7 +3829,7 @@
     // If _restart_addr is non-NULL, a marking stack overflow
     // occurred; we need to do a fresh marking iteration from the
     // indicated restart address.
-    if (_foregroundGCIsActive && asynch) {
+    if (_foregroundGCIsActive) {
       // We may be running into repeated stack overflows, having
       // reached the limit of the stack size, while making very
       // slow forward progress. It may be best to bail out and
@@ -4154,14 +3858,14 @@
   return true;
 }
 
-bool CMSCollector::do_marking_st(bool asynch) {
+bool CMSCollector::do_marking_st() {
   ResourceMark rm;
   HandleMark   hm;
 
   // Temporarily make refs discovery single threaded (non-MT)
   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
   MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
-    &_markStack, CMSYield && asynch);
+    &_markStack, CMSYield);
   // the last argument to iterate indicates whether the iteration
   // should be incremental with periodic yields.
   _markBitMap.iterate(&markFromRootsClosure);
@@ -4169,7 +3873,7 @@
   // occurred; we need to do a fresh iteration from the
   // indicated restart address.
   while (_restart_addr != NULL) {
-    if (_foregroundGCIsActive && asynch) {
+    if (_foregroundGCIsActive) {
       // We may be running into repeated stack overflows, having
       // reached the limit of the stack size, while making very
       // slow forward progress. It may be best to bail out and
@@ -4703,8 +4407,7 @@
   verify_overflow_empty();
 }
 
-void CMSCollector::checkpointRootsFinal(bool asynch,
-  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
+void CMSCollector::checkpointRootsFinal() {
   assert(_collectorState == FinalMarking, "incorrect state transition?");
   check_correct_thread_executing();
   // world is stopped at this checkpoint
@@ -4721,7 +4424,7 @@
                         _young_gen->used() / K,
                         _young_gen->capacity() / K);
   }
-  if (asynch) {
+  {
     if (CMSScavengeBeforeRemark) {
       GenCollectedHeap* gch = GenCollectedHeap::heap();
       // Temporarily set flag to false, GCH->do_collection will
@@ -4742,21 +4445,14 @@
     FreelistLocker x(this);
     MutexLockerEx y(bitMapLock(),
                     Mutex::_no_safepoint_check_flag);
-    assert(!init_mark_was_synchronous, "but that's impossible!");
-    checkpointRootsFinalWork(asynch, clear_all_soft_refs, false);
-  } else {
-    // already have all the locks
-    checkpointRootsFinalWork(asynch, clear_all_soft_refs,
-                             init_mark_was_synchronous);
+    checkpointRootsFinalWork();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
   SpecializationStats::print();
 }
 
-void CMSCollector::checkpointRootsFinalWork(bool asynch,
-  bool clear_all_soft_refs, bool init_mark_was_synchronous) {
-
+void CMSCollector::checkpointRootsFinalWork() {
   NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
 
   assert(haveFreelistLocks(), "must have free list locks");
@@ -4773,60 +4469,54 @@
   assert(haveFreelistLocks(), "must have free list locks");
   assert_lock_strong(bitMapLock());
 
-  if (!init_mark_was_synchronous) {
-    // We might assume that we need not fill TLAB's when
-    // CMSScavengeBeforeRemark is set, because we may have just done
-    // a scavenge which would have filled all TLAB's -- and besides
-    // Eden would be empty. This however may not always be the case --
-    // for instance although we asked for a scavenge, it may not have
-    // happened because of a JNI critical section. We probably need
-    // a policy for deciding whether we can in that case wait until
-    // the critical section releases and then do the remark following
-    // the scavenge, and skip it here. In the absence of that policy,
-    // or of an indication of whether the scavenge did indeed occur,
-    // we cannot rely on TLAB's having been filled and must do
-    // so here just in case a scavenge did not happen.
-    gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
-    // Update the saved marks which may affect the root scans.
-    gch->save_marks();
-
-    if (CMSPrintEdenSurvivorChunks) {
-      print_eden_and_survivor_chunk_arrays();
-    }
-
-    {
-      COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
-
-      // Note on the role of the mod union table:
-      // Since the marker in "markFromRoots" marks concurrently with
-      // mutators, it is possible for some reachable objects not to have been
-      // scanned. For instance, an only reference to an object A was
-      // placed in object B after the marker scanned B. Unless B is rescanned,
-      // A would be collected. Such updates to references in marked objects
-      // are detected via the mod union table which is the set of all cards
-      // dirtied since the first checkpoint in this GC cycle and prior to
-      // the most recent young generation GC, minus those cleaned up by the
-      // concurrent precleaning.
-      if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
-        GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
-        do_remark_parallel();
-      } else {
-        GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
-                    _gc_timer_cm, _gc_tracer_cm->gc_id());
-        do_remark_non_parallel();
-      }
-    }
-  } else {
-    assert(!asynch, "Can't have init_mark_was_synchronous in asynch mode");
-    // The initial mark was stop-world, so there's no rescanning to
-    // do; go straight on to the next step below.
+  // We might assume that we need not fill TLAB's when
+  // CMSScavengeBeforeRemark is set, because we may have just done
+  // a scavenge which would have filled all TLAB's -- and besides
+  // Eden would be empty. This however may not always be the case --
+  // for instance although we asked for a scavenge, it may not have
+  // happened because of a JNI critical section. We probably need
+  // a policy for deciding whether we can in that case wait until
+  // the critical section releases and then do the remark following
+  // the scavenge, and skip it here. In the absence of that policy,
+  // or of an indication of whether the scavenge did indeed occur,
+  // we cannot rely on TLAB's having been filled and must do
+  // so here just in case a scavenge did not happen.
+  gch->ensure_parsability(false);  // fill TLAB's, but no need to retire them
+  // Update the saved marks which may affect the root scans.
+  gch->save_marks();
+
+  if (CMSPrintEdenSurvivorChunks) {
+    print_eden_and_survivor_chunk_arrays();
+  }
+
+  {
+    COMPILER2_PRESENT(DerivedPointerTableDeactivate dpt_deact;)
+
+    // Note on the role of the mod union table:
+    // Since the marker in "markFromRoots" marks concurrently with
+    // mutators, it is possible for some reachable objects not to have been
+    // scanned. For instance, an only reference to an object A was
+    // placed in object B after the marker scanned B. Unless B is rescanned,
+    // A would be collected. Such updates to references in marked objects
+    // are detected via the mod union table which is the set of all cards
+    // dirtied since the first checkpoint in this GC cycle and prior to
+    // the most recent young generation GC, minus those cleaned up by the
+    // concurrent precleaning.
+    if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
+      GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());
+      do_remark_parallel();
+    } else {
+      GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
+                  _gc_timer_cm, _gc_tracer_cm->gc_id());
+      do_remark_non_parallel();
+    }
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
 
   {
     NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm, _gc_tracer_cm->gc_id());)
-    refProcessingWork(asynch, clear_all_soft_refs);
+    refProcessingWork();
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -5872,8 +5562,7 @@
   workers->run_task(&enq_task);
 }
 
-void CMSCollector::refProcessingWork(bool asynch, bool clear_all_soft_refs) {
-
+void CMSCollector::refProcessingWork() {
   ResourceMark rm;
   HandleMark   hm;
 
@@ -5881,7 +5570,7 @@
   assert(rp->span().equals(_span), "Spans should be equal");
   assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
   // Process weak references.
-  rp->setup_policy(clear_all_soft_refs);
+  rp->setup_policy(false);
   verify_work_stacks_empty();
 
   CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
@@ -6005,7 +5694,7 @@
 }
 #endif
 
-void CMSCollector::sweep(bool asynch) {
+void CMSCollector::sweep() {
   assert(_collectorState == Sweeping, "just checking");
   check_correct_thread_executing();
   verify_work_stacks_empty();
@@ -6019,14 +5708,14 @@
   assert(!_intra_sweep_timer.is_active(), "Should not be active");
   _intra_sweep_timer.reset();
   _intra_sweep_timer.start();
-  if (asynch) {
+  {
     TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
     CMSPhaseAccounting pa(this, "sweep", _gc_tracer_cm->gc_id(), !PrintGCDetails);
     // First sweep the old gen
     {
       CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
                                bitMapLock());
-      sweepWork(_cmsGen, asynch);
+      sweepWork(_cmsGen);
     }
 
     // Update Universe::_heap_*_at_gc figures.
@@ -6040,13 +5729,6 @@
       Universe::update_heap_info_at_gc();
       _collectorState = Resizing;
     }
-  } else {
-    // already have needed locks
-    sweepWork(_cmsGen,  asynch);
-    // Update heap occupancy information which is used as
-    // input to soft ref clearing policy at the next gc.
-    Universe::update_heap_info_at_gc();
-    _collectorState = Resizing;
   }
   verify_work_stacks_empty();
   verify_overflow_empty();
@@ -6139,20 +5821,7 @@
   }
 }
 
-void ConcurrentMarkSweepGeneration::rotate_debug_collection_type() {
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print("Rotate from %d ", _debug_collection_type);
-  }
-  _debug_collection_type = (CollectionTypes) (_debug_collection_type + 1);
-  _debug_collection_type =
-    (CollectionTypes) (_debug_collection_type % Unknown_collection_type);
-  if (PrintGCDetails && Verbose) {
-    gclog_or_tty->print_cr("to %d ", _debug_collection_type);
-  }
-}
-
-void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen,
-  bool asynch) {
+void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* gen) {
   // We iterate over the space(s) underlying this generation,
   // checking the mark bit map to see if the bits corresponding
   // to specific blocks are marked or not. Blocks that are
@@ -6180,9 +5849,7 @@
 
   // check that we hold the requisite locks
   assert(have_cms_token(), "Should hold cms token");
-  assert(   (asynch && ConcurrentMarkSweepThread::cms_thread_has_cms_token())
-         || (!asynch && ConcurrentMarkSweepThread::vm_thread_has_cms_token()),
-        "Should possess CMS token to sweep");
+  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
   assert_lock_strong(gen->freelistLock());
   assert_lock_strong(bitMapLock());
 
@@ -6194,8 +5861,7 @@
   gen->setNearLargestChunk();
 
   {
-    SweepClosure sweepClosure(this, gen, &_markBitMap,
-                            CMSYield && asynch);
+    SweepClosure sweepClosure(this, gen, &_markBitMap, CMSYield);
     gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
     // We need to free-up/coalesce garbage/blocks from a
     // co-terminal free run. This is done in the SweepClosure
@@ -6213,8 +5879,8 @@
 
 // Reset CMS data structures (for now just the marking bit map)
 // preparatory for the next cycle.
-void CMSCollector::reset(bool asynch) {
-  if (asynch) {
+void CMSCollector::reset(bool concurrent) {
+  if (concurrent) {
     CMSTokenSyncWithLocks ts(true, bitMapLock());
 
     // If the state is not "Resetting", the foreground  thread
@@ -6275,12 +5941,6 @@
     _collectorState = Idling;
   }
 
-  NOT_PRODUCT(
-    if (RotateCMSCollectionTypes) {
-      _cmsGen->rotate_debug_collection_type();
-    }
-  )
-
   register_gc_end();
 }
 
@@ -6293,7 +5953,7 @@
   switch (op) {
     case CMS_op_checkpointRootsInitial: {
       SvcGCMarker sgcm(SvcGCMarker::OTHER);
-      checkpointRootsInitial(true);       // asynch
+      checkpointRootsInitial();
       if (PrintGC) {
         _cmsGen->printOccupancy("initial-mark");
       }
@@ -6301,9 +5961,7 @@
     }
     case CMS_op_checkpointRootsFinal: {
       SvcGCMarker sgcm(SvcGCMarker::OTHER);
-      checkpointRootsFinal(true,    // asynch
-                           false,   // !clear_all_soft_refs
-                           false);  // !init_mark_was_synchronous
+      checkpointRootsFinal();
       if (PrintGC) {
         _cmsGen->printOccupancy("remark");
       }
@@ -7193,8 +6851,7 @@
                        CMSCollector* collector, MemRegion span,
                        CMSBitMap* bit_map,
                        OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack,
-                       bool should_yield):
+                       CMSMarkStack*  overflow_stack):
   _collector(collector),
   _whole_span(collector->_span),
   _span(span),
@@ -7202,7 +6859,6 @@
   _mut(&collector->_modUnionTable),
   _work_queue(work_queue),
   _overflow_stack(overflow_stack),
-  _yield(should_yield),
   _skip_bits(0),
   _task(task)
 {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -608,7 +608,6 @@
   GCHeapSummary _last_heap_summary;
   MetaspaceSummary _last_metaspace_summary;
 
-  void register_foreground_gc_start(GCCause::Cause cause);
   void register_gc_start(GCCause::Cause cause);
   void register_gc_end();
   void save_heap_summary();
@@ -695,8 +694,6 @@
   int    _numYields;
   size_t _numDirtyCards;
   size_t _sweep_count;
-  // Number of full gc's since the last concurrent gc.
-  uint   _full_gcs_since_conc_gc;
 
   // Occupancy used for bootstrapping stats
   double _bootstrap_occupancy;
@@ -760,14 +757,14 @@
   NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
 
   // CMS work methods
-  void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
+  void checkpointRootsInitialWork(); // Initial checkpoint work
 
   // A return value of false indicates failure due to stack overflow
-  bool markFromRootsWork(bool asynch);  // Concurrent marking work
+  bool markFromRootsWork();  // Concurrent marking work
 
  public:   // FIX ME!!! only for testing
-  bool do_marking_st(bool asynch);      // Single-threaded marking
-  bool do_marking_mt(bool asynch);      // Multi-threaded  marking
+  bool do_marking_st();      // Single-threaded marking
+  bool do_marking_mt();      // Multi-threaded  marking
 
  private:
 
@@ -788,20 +785,19 @@
   void reset_survivor_plab_arrays();
 
   // Final (second) checkpoint work
-  void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
-                                bool init_mark_was_synchronous);
+  void checkpointRootsFinalWork();
   // Work routine for parallel version of remark
   void do_remark_parallel();
   // Work routine for non-parallel version of remark
   void do_remark_non_parallel();
   // Reference processing work routine (during second checkpoint)
-  void refProcessingWork(bool asynch, bool clear_all_soft_refs);
+  void refProcessingWork();
 
   // Concurrent sweeping work
-  void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
+  void sweepWork(ConcurrentMarkSweepGeneration* gen);
 
   // (Concurrent) resetting of support data structures
-  void reset(bool asynch);
+  void reset(bool concurrent);
 
   // Clear _expansion_cause fields of constituent generations
   void clear_expansion_cause();
@@ -810,22 +806,10 @@
   // used regions of each generation to limit the extent of sweep
   void save_sweep_limits();
 
-  // A work method used by foreground collection to determine
-  // what type of collection (compacting or not, continuing or fresh)
-  // it should do.
-  void decide_foreground_collection_type(bool clear_all_soft_refs,
-    bool* should_compact, bool* should_start_over);
-
   // A work method used by the foreground collector to do
   // a mark-sweep-compact.
   void do_compaction_work(bool clear_all_soft_refs);
 
-  // A work method used by the foreground collector to do
-  // a mark-sweep, after taking over from a possibly on-going
-  // concurrent mark-sweep collection.
-  void do_mark_sweep_work(bool clear_all_soft_refs,
-    CollectorState first_state, bool should_start_over);
-
   // Work methods for reporting concurrent mode interruption or failure
   bool is_external_interruption();
   void report_concurrent_mode_interruption();
@@ -868,15 +852,13 @@
   // Locking checks
   NOT_PRODUCT(static bool have_cms_token();)
 
-  // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
   bool shouldConcurrentCollect();
 
   void collect(bool   full,
                bool   clear_all_soft_refs,
                size_t size,
                bool   tlab);
-  void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
-  void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
+  void collect_in_background(GCCause::Cause cause);
 
   // In support of ExplicitGCInvokesConcurrent
   static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
@@ -928,18 +910,16 @@
   void directAllocated(HeapWord* start, size_t size);
 
   // Main CMS steps and related support
-  void checkpointRootsInitial(bool asynch);
-  bool markFromRoots(bool asynch);  // a return value of false indicates failure
-                                    // due to stack overflow
+  void checkpointRootsInitial();
+  bool markFromRoots();  // a return value of false indicates failure
+                         // due to stack overflow
   void preclean();
-  void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
-                            bool init_mark_was_synchronous);
-  void sweep(bool asynch);
+  void checkpointRootsFinal();
+  void sweep();
 
   // Check that the currently executing thread is the expected
   // one (foreground collector or background collector).
   static void check_correct_thread_executing() PRODUCT_RETURN;
-  // XXXPERM void print_statistics()           PRODUCT_RETURN;
 
   bool is_cms_reachable(HeapWord* addr);
 
@@ -1060,15 +1040,6 @@
   // In support of MinChunkSize being larger than min object size
   const double _dilatation_factor;
 
-  enum CollectionTypes {
-    Concurrent_collection_type          = 0,
-    MS_foreground_collection_type       = 1,
-    MSC_foreground_collection_type      = 2,
-    Unknown_collection_type             = 3
-  };
-
-  CollectionTypes _debug_collection_type;
-
   // True if a compacting collection was done.
   bool _did_compact;
   bool did_compact() { return _did_compact; }
@@ -1152,7 +1123,7 @@
   // hack to allow the collection of the younger gen first if the flag is
   // set.
   virtual bool full_collects_younger_generations() const {
-    return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC;
+    return !ScavengeBeforeFullGC;
   }
 
   void space_iterate(SpaceClosure* blk, bool usedOnly = false);
@@ -1295,9 +1266,6 @@
   // Resize the generation after a non-compacting
   // collection.
   void compute_new_size_free_list();
-
-  CollectionTypes debug_collection_type() { return _debug_collection_type; }
-  void rotate_debug_collection_type();
 };
 
 //
@@ -1344,7 +1312,6 @@
   CMSBitMap*     _mut;
   OopTaskQueue*  _work_queue;
   CMSMarkStack*  _overflow_stack;
-  bool           _yield;
   int            _skip_bits;
   HeapWord*      _finger;
   HeapWord*      _threshold;
@@ -1354,8 +1321,7 @@
                        MemRegion span,
                        CMSBitMap* bit_map,
                        OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack,
-                       bool should_yield);
+                       CMSMarkStack*  overflow_stack);
   bool do_bit(size_t offset);
   inline void do_yield_check();
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -398,8 +398,7 @@
 
 inline void Par_MarkFromRootsClosure::do_yield_check() {
   if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
+      !_collector->foregroundGCIsActive()) {
     do_yield_work();
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -134,7 +134,7 @@
     if (_should_terminate) break;
     GCCause::Cause cause = _collector->_full_gc_requested ?
       _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
-    _collector->collect_in_background(false, cause);
+    _collector->collect_in_background(cause);
   }
   assert(_should_terminate, "just checking");
   // Check that the state of any protocol for synchronization
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -42,8 +42,12 @@
 void VM_CMS_Operation::acquire_pending_list_lock() {
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
-  ConcurrentMarkSweepThread::slt()->
-    manipulatePLL(SurrogateLockerThread::acquirePLL);
+  SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt();
+  if (slt != NULL) {
+    slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
+  } else {
+    SurrogateLockerThread::report_missing_slt();
+  }
 }
 
 void VM_CMS_Operation::release_and_notify_pending_list_lock() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -1888,7 +1888,7 @@
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
   // Create the gen rem set (and barrier set) for the entire reserved region.
-  _rem_set = collector_policy()->create_rem_set(reserved_region(), 2);
+  _rem_set = collector_policy()->create_rem_set(reserved_region());
   set_barrier_set(rem_set()->bs());
   if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
     vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
@@ -4270,10 +4270,11 @@
 
   if (state == G1CollectedHeap::InCSet) {
     oop forwardee;
-    if (obj->is_forwarded()) {
-      forwardee = obj->forwardee();
+    markOop m = obj->mark();
+    if (m->is_marked()) {
+      forwardee = (oop) m->decode_pointer();
     } else {
-      forwardee = _par_scan_state->copy_to_survivor_space(obj);
+      forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -150,7 +150,8 @@
   } while (!_refs->is_empty());
 }
 
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
+oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
+                                                 markOop const old_mark) {
   size_t word_sz = old->size();
   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
@@ -158,9 +159,8 @@
   assert( (from_region->is_young() && young_index >  0) ||
          (!from_region->is_young() && young_index == 0), "invariant" );
   G1CollectorPolicy* g1p = _g1h->g1_policy();
-  markOop m = old->mark();
-  int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
-                                           : m->age();
+  uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age()
+                                                   : old_mark->age();
   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
                                                              word_sz);
   AllocationContext_t context = from_region->allocation_context();
@@ -196,30 +196,22 @@
     alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
 
     if (g1p->track_object_age(alloc_purpose)) {
-      // We could simply do obj->incr_age(). However, this causes a
-      // performance issue. obj->incr_age() will first check whether
-      // the object has a displaced mark by checking its mark word;
-      // getting the mark word from the new location of the object
-      // stalls. So, given that we already have the mark word and we
-      // are about to install it anyway, it's better to increase the
-      // age on the mark word, when the object does not have a
-      // displaced mark word. We're not expecting many objects to have
-      // a displaced marked word, so that case is not optimized
-      // further (it could be...) and we simply call obj->incr_age().
-
-      if (m->has_displaced_mark_helper()) {
-        // in this case, we have to install the mark word first,
+      if (age < markOopDesc::max_age) {
+        age++;
+      }
+      if (old_mark->has_displaced_mark_helper()) {
+        // In this case, we have to install the mark word first,
         // otherwise obj looks to be forwarded (the old mark word,
         // which contains the forward pointer, was copied)
-        obj->set_mark(m);
-        obj->incr_age();
+        obj->set_mark(old_mark);
+        markOop new_mark = old_mark->displaced_mark_helper()->set_age(age);
+        old_mark->set_displaced_mark_helper(new_mark);
       } else {
-        m = m->incr_age();
-        obj->set_mark(m);
+        obj->set_mark(old_mark->set_age(age));
       }
-      age_table()->add(obj, word_sz);
+      age_table()->add(age, word_sz);
     } else {
-      obj->set_mark(m);
+      obj->set_mark(old_mark);
     }
 
     if (G1StringDedup::is_enabled()) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -195,7 +195,7 @@
   inline void dispatch_reference(StarTask ref);
  public:
 
-  oop copy_to_survivor_space(oop const obj);
+  oop copy_to_survivor_space(oop const obj, markOop const old_mark);
 
   void trim_queue();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -41,10 +41,11 @@
   G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
   if (in_cset_state == G1CollectedHeap::InCSet) {
     oop forwardee;
-    if (obj->is_forwarded()) {
-      forwardee = obj->forwardee();
+    markOop m = obj->mark();
+    if (m->is_marked()) {
+      forwardee = (oop) m->decode_pointer();
     } else {
-      forwardee = copy_to_survivor_space(obj);
+      forwardee = copy_to_survivor_space(obj, m);
     }
     oopDesc::encode_store_heap_oop(p, forwardee);
   } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -32,9 +32,8 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "runtime/thread.inline.hpp"
 
-G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap,
-                                                 int max_covered_regions) :
-    CardTableModRefBSForCTRS(whole_heap, max_covered_regions)
+G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) :
+    CardTableModRefBSForCTRS(whole_heap)
 {
   _kind = G1SATBCT;
 }
@@ -132,9 +131,8 @@
 }
 
 G1SATBCardTableLoggingModRefBS::
-G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
-                               int max_covered_regions) :
-  G1SATBCardTableModRefBS(whole_heap, max_covered_regions),
+G1SATBCardTableLoggingModRefBS(MemRegion whole_heap) :
+  G1SATBCardTableModRefBS(whole_heap),
   _dcqs(JavaThread::dirty_card_queue_set()),
   _listener()
 {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -50,8 +50,7 @@
   // pre-marking object graph.
   static void enqueue(oop pre_val);
 
-  G1SATBCardTableModRefBS(MemRegion whole_heap,
-                          int max_covered_regions);
+  G1SATBCardTableModRefBS(MemRegion whole_heap);
 
   bool is_a(BarrierSet::Name bsn) {
     return bsn == BarrierSet::G1SATBCT || CardTableModRefBS::is_a(bsn);
@@ -152,8 +151,7 @@
     return ReservedSpace::allocation_align_size_up(number_of_slots);
   }
 
-  G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
-                                 int max_covered_regions);
+  G1SATBCardTableLoggingModRefBS(MemRegion whole_heap);
 
   virtual void initialize() { }
   virtual void initialize(G1RegionToSpaceMapper* mapper);
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -1004,10 +1004,13 @@
 HeapWord* G1OffsetTableContigSpace::saved_mark_word() const {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
   assert( _gc_time_stamp <= g1h->get_gc_time_stamp(), "invariant" );
-  if (_gc_time_stamp < g1h->get_gc_time_stamp())
-    return top();
-  else
+  HeapWord* local_top = top();
+  OrderAccess::loadload();
+  if (_gc_time_stamp < g1h->get_gc_time_stamp()) {
+    return local_top;
+  } else {
     return Space::saved_mark_word();
+  }
 }
 
 void G1OffsetTableContigSpace::record_top_and_timestamp() {
--- a/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -213,8 +213,12 @@
   assert(_needs_pll, "don't call this otherwise");
   // The caller may block while communicating
   // with the SLT thread in order to acquire/release the PLL.
-  ConcurrentMarkThread::slt()->
-    manipulatePLL(SurrogateLockerThread::acquirePLL);
+  SurrogateLockerThread* slt = ConcurrentMarkThread::slt();
+  if (slt != NULL) {
+    slt->manipulatePLL(SurrogateLockerThread::acquirePLL);
+  } else {
+    SurrogateLockerThread::report_missing_slt();
+  }
 }
 
 void VM_CGC_Operation::release_and_notify_pending_list_lock() {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -53,8 +53,8 @@
     verify_card       = CardTableModRefBS::CT_MR_BS_last_reserved + 5
   };
 
-  CardTableExtension(MemRegion whole_heap, int max_covered_regions) :
-    CardTableModRefBS(whole_heap, max_covered_regions) { }
+  CardTableExtension(MemRegion whole_heap) :
+    CardTableModRefBS(whole_heap) { }
 
   // Too risky for the 4/10/02 putback
   // BarrierSet::Name kind() { return BarrierSet::CardTableExtension; }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -76,7 +76,7 @@
 
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region(), 3);
+  CardTableExtension* const barrier_set = new CardTableExtension(reserved_region());
   barrier_set->initialize();
   _barrier_set = barrier_set;
   oopDesc::set_bs(_barrier_set);
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -55,7 +55,10 @@
 
   // add entry
   void add(oop p, size_t oop_size) {
-    uint age = p->age();
+    add(p->age(), oop_size);
+  }
+
+  void add(uint age, size_t oop_size) {
     assert(age > 0 && age < table_size, "invalid age of object");
     sizes[age] += oop_size;
   }
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -138,6 +138,13 @@
   return res;
 }
 
+void SurrogateLockerThread::report_missing_slt() {
+  vm_exit_during_initialization(
+    "GC before GC support fully initialized: "
+    "SLT is needed but has not yet been created.");
+  ShouldNotReachHere();
+}
+
 void SurrogateLockerThread::manipulatePLL(SLT_msg_type msg) {
   MutexLockerEx x(&_monitor, Mutex::_no_safepoint_check_flag);
   assert(_buffer == empty, "Should be empty");
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -93,6 +93,9 @@
  public:
   static SurrogateLockerThread* make(TRAPS);
 
+  // Terminate VM with error message that SLT needed but not yet created.
+  static void report_missing_slt();
+
   SurrogateLockerThread();
 
   bool is_hidden_from_external_view() const     { return true; }
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -33,8 +33,8 @@
 #include "memory/referenceProcessorStats.hpp"
 #include "runtime/os.hpp"
 #include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/ticks.inline.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #endif
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -33,12 +33,11 @@
 #include "memory/allocation.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/referenceType.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/ticks.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #endif
-#include "utilities/macros.hpp"
-#include "utilities/ticks.hpp"
-
 
 class EvacuationInfo;
 class GCHeapSummary;
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -31,6 +31,7 @@
 #include "runtime/os.hpp"
 #include "trace/tracing.hpp"
 #include "trace/traceBackend.hpp"
+#include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/evacuationInfo.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
--- a/hotspot/src/share/vm/memory/allocation.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/allocation.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -50,8 +50,7 @@
                                  size_t word_size, bool read_only,
                                  MetaspaceObj::Type type, TRAPS) throw() {
   // Klass has it's own operator new
-  return Metaspace::allocate(loader_data, word_size, read_only,
-                             type, CHECK_NULL);
+  return Metaspace::allocate(loader_data, word_size, read_only, type, THREAD);
 }
 
 bool MetaspaceObj::is_shared() const {
--- a/hotspot/src/share/vm/memory/barrierSet.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/barrierSet.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -49,7 +49,12 @@
     TargetUninitialized = 1
   };
 protected:
-  int _max_covered_regions;
+  // Some barrier sets create tables whose elements correspond to parts of
+  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
+  // normally reserve space for such tables, and commit parts of the table
+  // "covering" parts of the heap that are committed. At most one covered
+  // region per generation is needed.
+  static const int _max_covered_regions = 2;
   Name _kind;
 
 public:
@@ -159,18 +164,6 @@
 protected:
   virtual void write_region_work(MemRegion mr) = 0;
 public:
-
-  // Some barrier sets create tables whose elements correspond to parts of
-  // the heap; the CardTableModRefBS is an example.  Such barrier sets will
-  // normally reserve space for such tables, and commit parts of the table
-  // "covering" parts of the heap that are committed.  The constructor is
-  // passed the maximum number of independently committable subregions to
-  // be covered, and the "resize_covered_region" function allows the
-  // sub-parts of the heap to inform the barrier set of changes of their
-  // sizes.
-  BarrierSet(int max_covered_regions) :
-    _max_covered_regions(max_covered_regions) {}
-
   // Inform the BarrierSet that the the covered heap region that starts
   // with "base" has been changed to have the given size (possibly from 0,
   // for initialization.)
--- a/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/binaryTreeDictionary.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -23,8 +23,8 @@
  */
 
 #include "precompiled.hpp"
-#include "utilities/macros.hpp"
 #include "gc_implementation/shared/allocationStats.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
 #include "memory/binaryTreeDictionary.hpp"
 #include "memory/freeList.hpp"
 #include "memory/freeBlockDictionary.hpp"
@@ -32,7 +32,6 @@
 #include "runtime/globals.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
-#include "gc_implementation/shared/spaceDecorator.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/adaptiveFreeList.hpp"
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -53,9 +53,8 @@
   return align_size_up(_guard_index + 1, MAX2(_page_size, granularity));
 }
 
-CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
-                                     int max_covered_regions):
-  ModRefBarrierSet(max_covered_regions),
+CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap) :
+  ModRefBarrierSet(),
   _whole_heap(whole_heap),
   _guard_index(0),
   _guard_region(),
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -284,7 +284,7 @@
     return bsn == BarrierSet::CardTableModRef || ModRefBarrierSet::is_a(bsn);
   }
 
-  CardTableModRefBS(MemRegion whole_heap, int max_covered_regions);
+  CardTableModRefBS(MemRegion whole_heap);
   ~CardTableModRefBS();
 
   virtual void initialize();
@@ -482,9 +482,8 @@
   bool card_will_be_scanned(jbyte cv);
   bool card_may_have_been_dirty(jbyte cv);
 public:
-  CardTableModRefBSForCTRS(MemRegion whole_heap,
-                           int max_covered_regions) :
-    CardTableModRefBS(whole_heap, max_covered_regions) {}
+  CardTableModRefBSForCTRS(MemRegion whole_heap) :
+    CardTableModRefBS(whole_heap) {}
 
   void set_CTRS(CardTableRS* rs) { _rs = rs; }
 };
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -38,21 +38,18 @@
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #endif // INCLUDE_ALL_GCS
 
-CardTableRS::CardTableRS(MemRegion whole_heap,
-                         int max_covered_regions) :
+CardTableRS::CardTableRS(MemRegion whole_heap) :
   GenRemSet(),
-  _cur_youngergen_card_val(youngergenP1_card),
-  _regions_to_iterate(max_covered_regions - 1)
+  _cur_youngergen_card_val(youngergenP1_card)
 {
 #if INCLUDE_ALL_GCS
   if (UseG1GC) {
-      _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap,
-                                                  max_covered_regions);
+      _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap);
   } else {
-    _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
+    _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
   }
 #else
-  _ct_bs = new CardTableModRefBSForCTRS(whole_heap, max_covered_regions);
+  _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
 #endif
   _ct_bs->initialize();
   set_bs(_ct_bs);
--- a/hotspot/src/share/vm/memory/cardTableRS.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/cardTableRS.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -83,7 +83,8 @@
 
   jbyte _cur_youngergen_card_val;
 
-  int _regions_to_iterate;
+  // Number of generations, plus one for lingering PermGen issues in CardTableRS.
+  static const int _regions_to_iterate = 3;
 
   jbyte cur_youngergen_card_val() {
     return _cur_youngergen_card_val;
@@ -101,7 +102,7 @@
   jbyte find_unused_youngergenP_card_value();
 
 public:
-  CardTableRS(MemRegion whole_heap, int max_covered_regions);
+  CardTableRS(MemRegion whole_heap);
   ~CardTableRS();
 
   // *** GenRemSet functions.
--- a/hotspot/src/share/vm/memory/collectorPolicy.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/collectorPolicy.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -152,9 +152,8 @@
   return result;
 }
 
-GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
-                                           int max_covered_regions) {
-  return new CardTableRS(whole_heap, max_covered_regions);
+GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap) {
+  return new CardTableRS(whole_heap);
 }
 
 void CollectorPolicy::cleared_all_soft_refs() {
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -152,10 +152,7 @@
 
   virtual BarrierSet::Name barrier_set_name() = 0;
 
-  // Create the remembered set (to cover the given reserved region,
-  // allowing breaking up into at most "max_covered_regions").
-  virtual GenRemSet* create_rem_set(MemRegion reserved,
-                                    int max_covered_regions);
+  virtual GenRemSet* create_rem_set(MemRegion reserved);
 
   // This method controls how a collector satisfies a request
   // for a block of memory.  "gc_time_limit_was_exceeded" will
--- a/hotspot/src/share/vm/memory/freeBlockDictionary.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/freeBlockDictionary.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -23,14 +23,13 @@
  */
 
 #include "precompiled.hpp"
+#include "memory/freeBlockDictionary.hpp"
+#include "memory/metachunk.hpp"
+#include "runtime/thread.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
-#include "memory/freeBlockDictionary.hpp"
-#include "memory/metachunk.hpp"
-#include "runtime/thread.inline.hpp"
-#include "utilities/macros.hpp"
 
 #ifndef PRODUCT
 template <class Chunk> Mutex* FreeBlockDictionary<Chunk>::par_lock() const {
--- a/hotspot/src/share/vm/memory/freeList.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/freeList.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -31,7 +31,6 @@
 #include "runtime/mutex.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/macros.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
 #endif // INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -109,13 +109,11 @@
 
   char* heap_address;
   size_t total_reserved = 0;
-  int n_covered_regions = 0;
   ReservedSpace heap_rs;
 
   size_t heap_alignment = collector_policy()->heap_alignment();
 
-  heap_address = allocate(heap_alignment, &total_reserved,
-                          &n_covered_regions, &heap_rs);
+  heap_address = allocate(heap_alignment, &total_reserved, &heap_rs);
 
   if (!heap_rs.is_reserved()) {
     vm_shutdown_during_initialization(
@@ -125,7 +123,7 @@
 
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  _rem_set = collector_policy()->create_rem_set(reserved_region(), n_covered_regions);
+  _rem_set = collector_policy()->create_rem_set(reserved_region());
   set_barrier_set(rem_set()->bs());
 
   _gch = this;
@@ -152,14 +150,12 @@
 
 char* GenCollectedHeap::allocate(size_t alignment,
                                  size_t* _total_reserved,
-                                 int* _n_covered_regions,
                                  ReservedSpace* heap_rs){
   const char overflow_msg[] = "The size of the object heap + VM data exceeds "
     "the maximum representable size";
 
   // Now figure out the total size.
   size_t total_reserved = 0;
-  int n_covered_regions = 0;
   const size_t pageSize = UseLargePages ?
       os::large_page_size() : os::vm_page_size();
 
@@ -170,18 +166,12 @@
     if (total_reserved < _gen_specs[i]->max_size()) {
       vm_exit_during_initialization(overflow_msg);
     }
-    n_covered_regions += _gen_specs[i]->n_covered_regions();
   }
   assert(total_reserved % alignment == 0,
          err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
                  SIZE_FORMAT, total_reserved, alignment));
 
-  // Needed until the cardtable is fixed to have the right number
-  // of covered regions.
-  n_covered_regions += 2;
-
   *_total_reserved = total_reserved;
-  *_n_covered_regions = n_covered_regions;
 
   *heap_rs = Universe::reserve_heap(total_reserved, alignment);
   return heap_rs->base();
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -121,9 +121,7 @@
 
   // Returns JNI_OK on success
   virtual jint initialize();
-  char* allocate(size_t alignment,
-                 size_t* _total_reserved, int* _n_covered_regions,
-                 ReservedSpace* heap_rs);
+  char* allocate(size_t alignment, size_t* _total_reserved, ReservedSpace* heap_rs);
 
   // Does operations required after initialization has been done.
   void post_initialize();
--- a/hotspot/src/share/vm/memory/generationSpec.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/generationSpec.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -59,10 +59,6 @@
     set_init_size(align_size_up(init_size(), alignment));
     set_max_size(align_size_up(max_size(), alignment));
   }
-
-  // Return the number of regions contained in the generation which
-  // might need to be independently covered by a remembered set.
-  virtual int n_covered_regions() const { return 1; }
 };
 
 typedef GenerationSpec* GenerationSpecPtr;
--- a/hotspot/src/share/vm/memory/heapInspection.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/heapInspection.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -367,7 +367,7 @@
       _csv_format(csv_format), _print_help(print_help),
       _print_class_stats(print_class_stats), _columns(columns) {}
   void heap_inspection(outputStream* st) NOT_SERVICES_RETURN;
-  size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN;
+  size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN_(0);
   static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
  private:
   void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -47,6 +47,7 @@
 #include "services/memoryService.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/macros.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
@@ -1411,7 +1412,7 @@
 
 size_t MetaspaceGC::capacity_until_GC() {
   size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
-  assert(value >= MetaspaceSize, "Not initialied properly?");
+  assert(value >= MetaspaceSize, "Not initialized properly?");
   return value;
 }
 
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -92,7 +92,7 @@
   static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
   static int preload_and_dump(const char * class_list_path,
                               GrowableArray<Klass*>* class_promote_order,
-                              TRAPS) NOT_CDS_RETURN;
+                              TRAPS) NOT_CDS_RETURN_(0);
 
   static ReservedSpace* shared_rs() {
     CDS_ONLY(return _shared_rs);
--- a/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/modRefBarrierSet.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -95,10 +95,6 @@
   // The caller guarantees that "mr" contains no references.  (Perhaps it's
   // objects have been moved elsewhere.)
   virtual void clear(MemRegion mr) = 0;
-
-  // Pass along the argument to the superclass.
-  ModRefBarrierSet(int max_covered_regions) :
-    BarrierSet(max_covered_regions) {}
 };
 
 #endif // SHARE_VM_MEMORY_MODREFBARRIERSET_HPP
--- a/hotspot/src/share/vm/memory/oopFactory.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/oopFactory.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -41,20 +41,20 @@
 class oopFactory: AllStatic {
  public:
   // Basic type leaf array allocation
-  static typeArrayOop    new_boolArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::boolArrayKlassObj  ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_charArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::charArrayKlassObj  ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_singleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::singleArrayKlassObj())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_doubleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::doubleArrayKlassObj())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_byteArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::byteArrayKlassObj  ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_shortArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::shortArrayKlassObj ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_intArray   (int length, TRAPS) { return TypeArrayKlass::cast(Universe::intArrayKlassObj   ())->allocate(length, CHECK_NULL); }
-  static typeArrayOop    new_longArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::longArrayKlassObj  ())->allocate(length, CHECK_NULL); }
+  static typeArrayOop    new_boolArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::boolArrayKlassObj  ())->allocate(length, THREAD); }
+  static typeArrayOop    new_charArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::charArrayKlassObj  ())->allocate(length, THREAD); }
+  static typeArrayOop    new_singleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::singleArrayKlassObj())->allocate(length, THREAD); }
+  static typeArrayOop    new_doubleArray(int length, TRAPS) { return TypeArrayKlass::cast(Universe::doubleArrayKlassObj())->allocate(length, THREAD); }
+  static typeArrayOop    new_byteArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::byteArrayKlassObj  ())->allocate(length, THREAD); }
+  static typeArrayOop    new_shortArray (int length, TRAPS) { return TypeArrayKlass::cast(Universe::shortArrayKlassObj ())->allocate(length, THREAD); }
+  static typeArrayOop    new_intArray   (int length, TRAPS) { return TypeArrayKlass::cast(Universe::intArrayKlassObj   ())->allocate(length, THREAD); }
+  static typeArrayOop    new_longArray  (int length, TRAPS) { return TypeArrayKlass::cast(Universe::longArrayKlassObj  ())->allocate(length, THREAD); }
 
   // create java.lang.Object[]
   static objArrayOop     new_objectArray(int length, TRAPS)  {
     assert(Universe::objectArrayKlassObj() != NULL, "Too early?");
     return ObjArrayKlass::
-      cast(Universe::objectArrayKlassObj())->allocate(length, CHECK_NULL);
+      cast(Universe::objectArrayKlassObj())->allocate(length, THREAD);
   }
 
   static typeArrayOop    new_charArray           (const char* utf8_str,  TRAPS);
--- a/hotspot/src/share/vm/memory/universe.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/memory/universe.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -26,9 +26,6 @@
 #include "classfile/classLoader.hpp"
 #include "classfile/classLoaderData.hpp"
 #include "classfile/javaClasses.hpp"
-#if INCLUDE_CDS
-#include "classfile/sharedClassUtil.hpp"
-#endif
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
@@ -82,6 +79,9 @@
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #endif // INCLUDE_ALL_GCS
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#endif
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/hotspot/src/share/vm/oops/constantPool.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/constantPool.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -461,7 +461,7 @@
 
 
 Klass* ConstantPool::klass_ref_at(int which, TRAPS) {
-  return klass_at(klass_ref_index_at(which), CHECK_NULL);
+  return klass_at(klass_ref_index_at(which), THREAD);
 }
 
 
--- a/hotspot/src/share/vm/oops/constantPool.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/constantPool.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -336,13 +336,13 @@
 
   Klass* klass_at(int which, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
-    return klass_at_impl(h_this, which, true, CHECK_NULL);
+    return klass_at_impl(h_this, which, true, THREAD);
   }
 
   // Version of klass_at that doesn't save the resolution error, called during deopt
   Klass* klass_at_ignore_error(int which, TRAPS) {
     constantPoolHandle h_this(THREAD, this);
-    return klass_at_impl(h_this, which, false, CHECK_NULL);
+    return klass_at_impl(h_this, which, false, THREAD);
   }
 
   Symbol* klass_name_at(int which);  // Returns the name, w/o resolving.
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -532,7 +532,7 @@
   // 1) Verify the bytecodes
   Verifier::Mode mode =
     throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
-  return Verifier::verify(this_k, mode, this_k->should_verify_class(), CHECK_false);
+  return Verifier::verify(this_k, mode, this_k->should_verify_class(), THREAD);
 }
 
 
@@ -1130,7 +1130,7 @@
   if (or_null) {
     return oak->array_klass_or_null(n);
   }
-  return oak->array_klass(n, CHECK_NULL);
+  return oak->array_klass(n, THREAD);
 }
 
 Klass* InstanceKlass::array_klass_impl(bool or_null, TRAPS) {
--- a/hotspot/src/share/vm/oops/klass.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/klass.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -152,7 +152,7 @@
 
 void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
   return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
-                             MetaspaceObj::ClassType, CHECK_NULL);
+                             MetaspaceObj::ClassType, THREAD);
 }
 
 Klass::Klass() {
--- a/hotspot/src/share/vm/oops/methodData.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -658,7 +658,7 @@
   int size = MethodData::compute_allocation_size_in_words(method);
 
   return new (loader_data, size, false, MetaspaceObj::MethodDataType, THREAD)
-    MethodData(method(), size, CHECK_NULL);
+    MethodData(method(), size, THREAD);
 }
 
 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -189,7 +189,7 @@
     if (length <= arrayOopDesc::max_array_length(T_OBJECT)) {
       int size = objArrayOopDesc::object_size(length);
       KlassHandle h_k(THREAD, this);
-      return (objArrayOop)CollectedHeap::array_allocate(h_k, size, length, CHECK_NULL);
+      return (objArrayOop)CollectedHeap::array_allocate(h_k, size, length, THREAD);
     } else {
       report_java_out_of_memory("Requested array size exceeds VM limit");
       JvmtiExport::post_array_size_exhausted();
@@ -362,11 +362,11 @@
   if (or_null) {
     return ak->array_klass_or_null(n);
   }
-  return ak->array_klass(n, CHECK_NULL);
+  return ak->array_klass(n, THREAD);
 }
 
 Klass* ObjArrayKlass::array_klass_impl(bool or_null, TRAPS) {
-  return array_klass_impl(or_null, dimension() +  1, CHECK_NULL);
+  return array_klass_impl(or_null, dimension() +  1, THREAD);
 }
 
 bool ObjArrayKlass::can_be_primary_super_slow() const {
--- a/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/oop.pcgc.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -25,8 +25,8 @@
 #ifndef SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
 #define SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
 
+#include "runtime/atomic.inline.hpp"
 #include "utilities/macros.hpp"
-#include "runtime/atomic.inline.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/parNew/parNewGeneration.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
--- a/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -191,7 +191,7 @@
   if (or_null) {
     return h_ak->array_klass_or_null(n);
   }
-  return h_ak->array_klass(n, CHECK_NULL);
+  return h_ak->array_klass(n, THREAD);
 }
 
 Klass* TypeArrayKlass::array_klass_impl(bool or_null, TRAPS) {
--- a/hotspot/src/share/vm/prims/jni.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/prims/jni.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -32,10 +32,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "interpreter/linkResolver.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
 #include "memory/allocation.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/gcLocker.inline.hpp"
@@ -81,6 +77,10 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/histogram.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif // INCLUDE_ALL_GCS
 
 static jint CurrentVersion = JNI_VERSION_1_8;
 
@@ -603,6 +603,7 @@
 
   THROW_OOP_(JNIHandles::resolve(obj), JNI_OK);
   ShouldNotReachHere();
+  return 0;  // Mute compiler.
 JNI_END
 
 
@@ -623,6 +624,7 @@
   Handle protection_domain (THREAD, k->protection_domain());
   THROW_MSG_LOADER_(name, (char *)message, class_loader, protection_domain, JNI_OK);
   ShouldNotReachHere();
+  return 0;  // Mute compiler.
 JNI_END
 
 
--- a/hotspot/src/share/vm/prims/jvm.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -28,10 +28,6 @@
 #include "classfile/javaClasses.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/systemDictionary.hpp"
-#if INCLUDE_CDS
-#include "classfile/sharedClassUtil.hpp"
-#include "classfile/systemDictionaryShared.hpp"
-#endif
 #include "classfile/vmSymbols.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "interpreter/bytecode.hpp"
@@ -73,8 +69,13 @@
 #include "utilities/dtrace.hpp"
 #include "utilities/events.hpp"
 #include "utilities/histogram.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/top.hpp"
 #include "utilities/utf8.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
 #ifdef TARGET_OS_FAMILY_linux
 # include "jvm_linux.h"
 #endif
@@ -3552,7 +3553,7 @@
     JVM_DTraceProvider* providers))
   JVMWrapper("JVM_DTraceActivate");
   return DTraceJSDT::activate(
-    version, module_name, providers_count, providers, CHECK_0);
+    version, module_name, providers_count, providers, THREAD);
 JVM_END
 
 JVM_ENTRY(jboolean,JVM_DTraceIsProbeEnabled(JNIEnv* env, jmethodID method))
--- a/hotspot/src/share/vm/prims/methodHandles.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -399,12 +399,12 @@
 // convert the external string or reflective type to an internal signature
 Symbol* MethodHandles::lookup_signature(oop type_str, bool intern_if_not_found, TRAPS) {
   if (java_lang_invoke_MethodType::is_instance(type_str)) {
-    return java_lang_invoke_MethodType::as_signature(type_str, intern_if_not_found, CHECK_NULL);
+    return java_lang_invoke_MethodType::as_signature(type_str, intern_if_not_found, THREAD);
   } else if (java_lang_Class::is_instance(type_str)) {
-    return java_lang_Class::as_signature(type_str, false, CHECK_NULL);
+    return java_lang_Class::as_signature(type_str, false, THREAD);
   } else if (java_lang_String::is_instance(type_str)) {
     if (intern_if_not_found) {
-      return java_lang_String::as_symbol(type_str, CHECK_NULL);
+      return java_lang_String::as_symbol(type_str, THREAD);
     } else {
       return java_lang_String::as_symbol_or_null(type_str);
     }
--- a/hotspot/src/share/vm/prims/unsafe.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/prims/unsafe.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -24,10 +24,6 @@
 
 #include "precompiled.hpp"
 #include "classfile/vmSymbols.hpp"
-#include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
 #include "memory/allocation.inline.hpp"
 #include "prims/jni.h"
 #include "prims/jvm.h"
@@ -43,6 +39,10 @@
 #include "trace/tracing.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/dtrace.hpp"
+#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif // INCLUDE_ALL_GCS
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -26,45 +26,38 @@
 
 #include <new>
 
+#include "classfile/classLoaderData.hpp"
+#include "classfile/stringTable.hpp"
 #include "code/codeCache.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
-
-#include "classfile/stringTable.hpp"
-#include "classfile/classLoaderData.hpp"
-
+#include "prims/wbtestmethods/parserTests.hpp"
 #include "prims/whitebox.hpp"
-#include "prims/wbtestmethods/parserTests.hpp"
-
-#include "runtime/thread.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/compilationPolicy.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
+#include "runtime/sweeper.hpp"
+#include "runtime/thread.hpp"
 #include "runtime/vm_version.hpp"
-#include "runtime/sweeper.hpp"
-
 #include "utilities/array.hpp"
 #include "utilities/debug.hpp"
+#include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
-#include "utilities/exceptions.hpp"
-
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
 #include "gc_implementation/g1/concurrentMark.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #include "gc_implementation/g1/heapRegionRemSet.hpp"
 #endif // INCLUDE_ALL_GCS
-
 #if INCLUDE_NMT
 #include "services/mallocSiteTable.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/nativeCallStack.hpp"
 #endif // INCLUDE_NMT
 
-#include "compiler/compileBroker.hpp"
-#include "runtime/compilationPolicy.hpp"
 
 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
 
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -2202,15 +2202,6 @@
     warning("DefaultMaxRAMFraction is deprecated and will likely be removed in a future release. "
         "Use MaxRAMFraction instead.");
   }
-  if (FLAG_IS_CMDLINE(UseCMSCompactAtFullCollection)) {
-    warning("UseCMSCompactAtFullCollection is deprecated and will likely be removed in a future release.");
-  }
-  if (FLAG_IS_CMDLINE(CMSFullGCsBeforeCompaction)) {
-    warning("CMSFullGCsBeforeCompaction is deprecated and will likely be removed in a future release.");
-  }
-  if (FLAG_IS_CMDLINE(UseCMSCollectionPassing)) {
-    warning("UseCMSCollectionPassing is deprecated and will likely be removed in a future release.");
-  }
 }
 
 // Check stack pages settings
--- a/hotspot/src/share/vm/runtime/atomic.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/atomic.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -25,7 +25,13 @@
 #include "precompiled.hpp"
 #include "runtime/atomic.inline.hpp"
 
-jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
+/*
+ * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
+ * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
+ * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
+ * implementation to be used instead.
+ */
+jbyte Atomic::cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
   assert(sizeof(jbyte) == 1, "assumption.");
   uintptr_t dest_addr = (uintptr_t)dest;
   uintptr_t offset = dest_addr % sizeof(jint);
--- a/hotspot/src/share/vm/runtime/atomic.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/atomic.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -28,6 +28,9 @@
 #include "memory/allocation.hpp"
 
 class Atomic : AllStatic {
+ private:
+  static jbyte cmpxchg_general(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value);
+
  public:
   // Atomic operations on jlong types are not available on all 32-bit
   // platforms. If atomic ops on jlongs are defined here they must only
@@ -104,7 +107,7 @@
   // *dest with exchange_value if the comparison succeeded. Returns prior
   // value of *dest. cmpxchg*() provide:
   // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
-  static jbyte           cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value);
+  inline static jbyte    cmpxchg    (jbyte        exchange_value, volatile jbyte*        dest, jbyte        compare_value);
   inline static jint     cmpxchg    (jint         exchange_value, volatile jint*         dest, jint         compare_value);
   // See comment above about using jlong atomics on 32-bit platforms
   inline static jlong    cmpxchg    (jlong        exchange_value, volatile jlong*        dest, jlong        compare_value);
--- a/hotspot/src/share/vm/runtime/atomic.inline.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/atomic.inline.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -87,4 +87,12 @@
   dec_ptr((volatile intptr_t*) dest);
 }
 
+#ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+// See comment in atomic.cpp how to override.
+inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte *dest, jbyte comparand)
+{
+  return cmpxchg_general(exchange_value, dest, comparand);
+}
+#endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
+
 #endif // SHARE_VM_RUNTIME_ATOMIC_INLINE_HPP
--- a/hotspot/src/share/vm/runtime/fieldDescriptor.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/fieldDescriptor.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -94,7 +94,7 @@
 }
 
 oop fieldDescriptor::string_initial_value(TRAPS) const {
-  return constants()->uncached_string_at(initial_value_index(), CHECK_0);
+  return constants()->uncached_string_at(initial_value_index(), THREAD);
 }
 
 void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
--- a/hotspot/src/share/vm/runtime/globals.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/globals.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -29,10 +29,10 @@
 #include "runtime/globals.hpp"
 #include "runtime/globals_extension.hpp"
 #include "runtime/os.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/top.hpp"
-#include "trace/tracing.hpp"
 #if INCLUDE_ALL_GCS
 #include "gc_implementation/g1/g1_globals.hpp"
 #endif // INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/runtime/globals.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -1523,15 +1523,9 @@
   develop(bool, UseAsyncConcMarkSweepGC, true,                              \
           "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
                                                                             \
-  develop(bool, RotateCMSCollectionTypes, false,                            \
-          "Rotate the CMS collections among concurrent and STW")            \
-                                                                            \
   product(bool, UseCMSBestFit, true,                                        \
           "Use CMS best fit allocation strategy")                           \
                                                                             \
-  product(bool, UseCMSCollectionPassing, true,                              \
-          "Use passing of collection from background to foreground")        \
-                                                                            \
   product(bool, UseParNewGC, false,                                         \
           "Use parallel threads in the new generation")                     \
                                                                             \
@@ -1707,16 +1701,6 @@
           "When CMS class unloading is enabled, the maximum CMS cycle "     \
           "count for which classes may not be unloaded")                    \
                                                                             \
-  product(bool, CMSCompactWhenClearAllSoftRefs, true,                       \
-          "Compact when asked to collect CMS gen with "                     \
-          "clear_all_soft_refs()")                                          \
-                                                                            \
-  product(bool, UseCMSCompactAtFullCollection, true,                        \
-          "Use Mark-Sweep-Compact algorithm at full collections")           \
-                                                                            \
-  product(uintx, CMSFullGCsBeforeCompaction, 0,                             \
-          "Number of CMS full collection done before compaction if > 0")    \
-                                                                            \
   develop(intx, CMSDictionaryChoice, 0,                                     \
           "Use BinaryTreeDictionary as default in the CMS generation")      \
                                                                             \
--- a/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/interfaceSupport.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -87,7 +87,7 @@
   // Short-circuit any possible re-entrant gc-a-lot attempt
   if (thread->skip_gcalot()) return;
 
-  if (is_init_completed()) {
+  if (Threads::is_vm_complete()) {
 
     if (++_fullgc_alot_invocation < FullGCALotStart) {
       return;
--- a/hotspot/src/share/vm/runtime/perfData.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/perfData.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -773,7 +773,7 @@
     static PerfStringVariable* create_string_variable(CounterNS ns,
                                                       const char* name,
                                                       const char *s, TRAPS) {
-      return create_string_variable(ns, name, 0, s, CHECK_NULL);
+      return create_string_variable(ns, name, 0, s, THREAD);
     };
 
     static PerfLongVariable* create_long_variable(CounterNS ns,
@@ -784,7 +784,7 @@
     static PerfLongVariable* create_long_variable(CounterNS ns,
                                                   const char* name,
                                                   PerfData::Units u, TRAPS) {
-      return create_long_variable(ns, name, u, (jlong)0, CHECK_NULL);
+      return create_long_variable(ns, name, u, (jlong)0, THREAD);
     };
 
     static PerfLongVariable* create_long_variable(CounterNS, const char* name,
@@ -805,7 +805,7 @@
 
     static PerfLongCounter* create_long_counter(CounterNS ns, const char* name,
                                                 PerfData::Units u, TRAPS) {
-      return create_long_counter(ns, name, u, (jlong)0, CHECK_NULL);
+      return create_long_counter(ns, name, u, (jlong)0, THREAD);
     };
 
     static PerfLongCounter* create_long_counter(CounterNS ns, const char* name,
@@ -823,49 +823,49 @@
 
     static PerfConstant* create_constant(CounterNS ns, const char* name,
                                          PerfData::Units u, jlong val, TRAPS) {
-      return create_long_constant(ns, name, u, val, CHECK_NULL);
+      return create_long_constant(ns, name, u, val, THREAD);
     }
 
     static PerfVariable* create_variable(CounterNS ns, const char* name,
                                          PerfData::Units u, jlong ival, TRAPS) {
-      return create_long_variable(ns, name, u, ival, CHECK_NULL);
+      return create_long_variable(ns, name, u, ival, THREAD);
     }
 
     static PerfVariable* create_variable(CounterNS ns, const char* name,
                                          PerfData::Units u, TRAPS) {
-      return create_long_variable(ns, name, u, (jlong)0, CHECK_NULL);
+      return create_long_variable(ns, name, u, (jlong)0, THREAD);
     }
 
     static PerfVariable* create_variable(CounterNS ns, const char* name,
                                          PerfData::Units u, jlong* sp, TRAPS) {
-      return create_long_variable(ns, name, u, sp, CHECK_NULL);
+      return create_long_variable(ns, name, u, sp, THREAD);
     }
 
     static PerfVariable* create_variable(CounterNS ns, const char* name,
                                          PerfData::Units u,
                                          PerfSampleHelper* sh, TRAPS) {
-      return create_long_variable(ns, name, u, sh, CHECK_NULL);
+      return create_long_variable(ns, name, u, sh, THREAD);
     }
 
     static PerfCounter* create_counter(CounterNS ns, const char* name,
                                        PerfData::Units u, jlong ival, TRAPS) {
-      return create_long_counter(ns, name, u, ival, CHECK_NULL);
+      return create_long_counter(ns, name, u, ival, THREAD);
     }
 
     static PerfCounter* create_counter(CounterNS ns, const char* name,
                                        PerfData::Units u, TRAPS) {
-      return create_long_counter(ns, name, u, (jlong)0, CHECK_NULL);
+      return create_long_counter(ns, name, u, (jlong)0, THREAD);
     }
 
     static PerfCounter* create_counter(CounterNS ns, const char* name,
                                        PerfData::Units u, jlong* sp, TRAPS) {
-      return create_long_counter(ns, name, u, sp, CHECK_NULL);
+      return create_long_counter(ns, name, u, sp, THREAD);
     }
 
     static PerfCounter* create_counter(CounterNS ns, const char* name,
                                        PerfData::Units u,
                                        PerfSampleHelper* sh, TRAPS) {
-      return create_long_counter(ns, name, u, sh, CHECK_NULL);
+      return create_long_counter(ns, name, u, sh, THREAD);
     }
 
     static void destroy();
--- a/hotspot/src/share/vm/runtime/reflection.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/reflection.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -635,7 +635,7 @@
 }
 
 objArrayHandle Reflection::get_exception_types(methodHandle method, TRAPS) {
-  return method->resolved_checked_exceptions(CHECK_(objArrayHandle()));
+  return method->resolved_checked_exceptions(THREAD);
 }
 
 
@@ -1003,7 +1003,7 @@
   } else {
     if (rtype == T_BOOLEAN || rtype == T_BYTE || rtype == T_CHAR || rtype == T_SHORT)
       narrow((jvalue*) result.get_value_addr(), rtype, CHECK_NULL);
-    return box((jvalue*) result.get_value_addr(), rtype, CHECK_NULL);
+    return box((jvalue*) result.get_value_addr(), rtype, THREAD);
   }
 }
 
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -986,7 +986,7 @@
   // last java frame on stack (which includes native call frames)
   vframeStream vfst(thread, true);  // Do not skip and javaCalls
 
-  return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
+  return find_callee_info_helper(thread, vfst, bc, callinfo, THREAD);
 }
 
 
--- a/hotspot/src/share/vm/runtime/stubRoutines.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/stubRoutines.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -62,6 +62,7 @@
 address StubRoutines::_atomic_store_ptr_entry                   = NULL;
 address StubRoutines::_atomic_cmpxchg_entry                     = NULL;
 address StubRoutines::_atomic_cmpxchg_ptr_entry                 = NULL;
+address StubRoutines::_atomic_cmpxchg_byte_entry                = NULL;
 address StubRoutines::_atomic_cmpxchg_long_entry                = NULL;
 address StubRoutines::_atomic_add_entry                         = NULL;
 address StubRoutines::_atomic_add_ptr_entry                     = NULL;
--- a/hotspot/src/share/vm/runtime/stubRoutines.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/stubRoutines.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -126,6 +126,7 @@
   static address _atomic_store_ptr_entry;
   static address _atomic_cmpxchg_entry;
   static address _atomic_cmpxchg_ptr_entry;
+  static address _atomic_cmpxchg_byte_entry;
   static address _atomic_cmpxchg_long_entry;
   static address _atomic_add_entry;
   static address _atomic_add_ptr_entry;
@@ -282,6 +283,7 @@
   static address atomic_store_ptr_entry()                  { return _atomic_store_ptr_entry; }
   static address atomic_cmpxchg_entry()                    { return _atomic_cmpxchg_entry; }
   static address atomic_cmpxchg_ptr_entry()                { return _atomic_cmpxchg_ptr_entry; }
+  static address atomic_cmpxchg_byte_entry()               { return _atomic_cmpxchg_byte_entry; }
   static address atomic_cmpxchg_long_entry()               { return _atomic_cmpxchg_long_entry; }
   static address atomic_add_entry()                        { return _atomic_add_entry; }
   static address atomic_add_ptr_entry()                    { return _atomic_add_ptr_entry; }
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -169,7 +169,7 @@
   void waitUninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); }
   // complete_exit gives up lock completely, returning recursion count
   // reenter reclaims lock with original recursion count
-  intptr_t complete_exit(TRAPS)  { return ObjectSynchronizer::complete_exit(_obj, CHECK_0); }
+  intptr_t complete_exit(TRAPS)  { return ObjectSynchronizer::complete_exit(_obj, THREAD); }
   void reenter(intptr_t recursion, TRAPS)  { ObjectSynchronizer::reenter(_obj, recursion, CHECK); }
 };
 
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -473,7 +473,6 @@
                                                                                                                                      \
   unchecked_nonstatic_field(ageTable,          sizes,                                         sizeof(ageTable::sizes))               \
                                                                                                                                      \
-  nonstatic_field(BarrierSet,                  _max_covered_regions,                          int)                                   \
   nonstatic_field(BarrierSet,                  _kind,                                         BarrierSet::Name)                      \
   nonstatic_field(BlockOffsetTable,            _bottom,                                       HeapWord*)                             \
   nonstatic_field(BlockOffsetTable,            _end,                                          HeapWord*)                             \
--- a/hotspot/src/share/vm/services/memTracker.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/services/memTracker.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -64,7 +64,7 @@
     const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
   static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
   static inline Tracker get_virtual_memory_uncommit_tracker() { return Tracker(); }
-  static inline Tracker get_virtual_memory_release_tracker() { }
+  static inline Tracker get_virtual_memory_release_tracker() { return Tracker(); }
   static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
   static inline void record_thread_stack(void* addr, size_t size) { }
   static inline void release_thread_stack(void* addr, size_t size) { }
--- a/hotspot/src/share/vm/utilities/array.hpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/utilities/array.hpp	Mon Nov 24 23:28:48 2014 +0100
@@ -322,7 +322,7 @@
   void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() {
     size_t word_size = Array::size(length);
     return (void*) Metaspace::allocate(loader_data, word_size, read_only,
-                                       MetaspaceObj::array_type(sizeof(T)), CHECK_NULL);
+                                       MetaspaceObj::array_type(sizeof(T)), THREAD);
   }
 
   static size_t byte_sizeof(int length) { return sizeof(Array<T>) + MAX2(length - 1, 0) * sizeof(T); }
--- a/hotspot/src/share/vm/utilities/ostream.cpp	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/src/share/vm/utilities/ostream.cpp	Mon Nov 24 23:28:48 2014 +0100
@@ -30,6 +30,7 @@
 #include "runtime/os.hpp"
 #include "runtime/vm_version.hpp"
 #include "utilities/defaultStream.hpp"
+#include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/top.hpp"
 #include "utilities/xmlstream.hpp"
--- a/hotspot/test/compiler/regalloc/C1ObjectSpillInLogicOp.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/compiler/regalloc/C1ObjectSpillInLogicOp.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test
  * @bug 8027751
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @summary C1 crashes generating G1 post-barrier in Unsafe.getAndSetObject() intrinsic because of the new value spill
  * @run main/othervm -XX:+UseG1GC C1ObjectSpillInLogicOp
  *
--- a/hotspot/test/gc/6581734/Test6581734.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/6581734/Test6581734.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test Test6581734.java
  * @bug 6581734
+ * @requires vm.gc=="ConcMarkSweep" | vm.gc=="null"
  * @summary CMS Old Gen's collection usage is zero after GC which is incorrect
  * @run main/othervm -Xmx512m -verbose:gc -XX:+UseConcMarkSweepGC Test6581734
  *
--- a/hotspot/test/gc/TestSystemGC.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/TestSystemGC.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test TestSystemGC
  * @key gc
+ * @requires vm.gc=="null"
  * @summary Runs System.gc() with different flags.
  * @run main/othervm TestSystemGC
  * @run main/othervm -XX:+UseSerialGC TestSystemGC
--- a/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/arguments/TestAlignmentToUseLargePages.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
  * @bug 8024396
  * @key gc
  * @key regression
+ * @requires vm.gc=="null"
  * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
  * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
  * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
--- a/hotspot/test/gc/arguments/TestG1HeapRegionSize.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/arguments/TestG1HeapRegionSize.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,12 @@
  * @test TestG1HeapRegionSize
  * @key gc
  * @bug 8021879
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @summary Verify that the flag G1HeapRegionSize is updated properly
  * @run main/othervm -Xmx64m TestG1HeapRegionSize 1048576
- * @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m TestG1HeapRegionSize 2097152
- * @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m TestG1HeapRegionSize 2097152
- * @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m TestG1HeapRegionSize 33554432
+ * @run main/othervm -XX:G1HeapRegionSize=2m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
+ * @run main/othervm -XX:G1HeapRegionSize=3m -Xmx64m -XX:+UseG1GC TestG1HeapRegionSize 2097152
+ * @run main/othervm -XX:G1HeapRegionSize=64m -Xmx256m -XX:+UseG1GC TestG1HeapRegionSize 33554432
  */
 
 import sun.management.ManagementFactoryHelper;
@@ -41,14 +42,8 @@
   public static void main(String[] args) {
     HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
 
-    VMOption option = diagnostic.getVMOption("UseG1GC");
-    if (option.getValue().equals("false")) {
-      System.out.println("Skipping this test. It is only a G1 test.");
-      return;
-    }
-
     String expectedValue = getExpectedValue(args);
-    option = diagnostic.getVMOption("G1HeapRegionSize");
+    VMOption option = diagnostic.getVMOption("G1HeapRegionSize");
     if (!expectedValue.equals(option.getValue())) {
       throw new RuntimeException("Wrong value for G1HeapRegionSize. Expected " + expectedValue + " but got " + option.getValue());
     }
--- a/hotspot/test/gc/concurrentMarkSweep/CheckAllocateAndSystemGC.java	Fri Nov 21 08:00:31 2014 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- * @test CheckAllocateAndSystemGC
- * @summary CMS: assert(used() == used_after_gc && used_after_gc <= capacity()) failed: used: 0 used_after_gc: 292080 capacity: 1431699456
- * @bug 8013032
- * @key gc
- * @key regression
- * @library /testlibrary
- * @run main/othervm CheckAllocateAndSystemGC
- * @author jon.masamitsu@oracle.com
- */
-
-import com.oracle.java.testlibrary.*;
-
-public class CheckAllocateAndSystemGC {
-  public static void main(String args[]) throws Exception {
-
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-      "-showversion",
-      "-XX:+UseConcMarkSweepGC",
-      "-Xmn4m",
-      "-XX:MaxTenuringThreshold=1",
-      "-XX:-UseCMSCompactAtFullCollection",
-      "CheckAllocateAndSystemGC$AllocateAndSystemGC"
-      );
-
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
-    output.shouldNotContain("error");
-
-    output.shouldHaveExitValue(0);
-  }
-  static class AllocateAndSystemGC {
-    public static void main(String [] args) {
-      Integer x[] = new Integer [1000];
-      // Allocate enough objects to cause a minor collection.
-      // These allocations suffice for a 4m young geneneration.
-      for (int i = 0; i < 100; i++) {
-        Integer y[] = new Integer[10000];
-      }
-      System.gc();
-    }
-  }
-}
--- a/hotspot/test/gc/concurrentMarkSweep/DisableResizePLAB.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/concurrentMarkSweep/DisableResizePLAB.java	Mon Nov 24 23:28:48 2014 +0100
@@ -26,6 +26,7 @@
  * @key gc
  * @bug 8060467
  * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
+ * @requires vm.gc=="ConcMarkSweep" | vm.gc=="null"
  * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
  * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
  */
--- a/hotspot/test/gc/concurrentMarkSweep/SystemGCOnForegroundCollector.java	Fri Nov 21 08:00:31 2014 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/**
- * @test SystemGCOnForegroundCollector
- * @summary CMS: Call reset_after_compaction() only if a compaction has been done
- * @bug 8013184
- * @key gc
- * @key regression
- * @library /testlibrary
- * @run main/othervm SystemGCOnForegroundCollector
- * @author jon.masamitsu@oracle.com
- */
-
-import com.oracle.java.testlibrary.*;
-
-public class SystemGCOnForegroundCollector {
-  public static void main(String args[]) throws Exception {
-
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-      "-showversion",
-      "-XX:+UseConcMarkSweepGC",
-      "-XX:MaxTenuringThreshold=1",
-      "-XX:-UseCMSCompactAtFullCollection",
-      ThreePlusMSSystemGC.class.getName()
-      );
-
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
-    output.shouldNotContain("error");
-
-    output.shouldHaveExitValue(0);
-  }
-
-  static class ThreePlusMSSystemGC {
-    public static void main(String [] args) {
-      // From running this test 3 System.gc() were always
-      // enough to see the failure but the cause of the failure
-      // depends on how objects are allocated in the CMS generation
-      // which is non-deterministic.  Use 30 iterations for a more
-      // reliable test.
-      for (int i = 0; i < 30; i++) {
-        System.gc();
-      }
-    }
-  }
-}
--- a/hotspot/test/gc/defnew/HeapChangeLogging.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/defnew/HeapChangeLogging.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,8 +28,6 @@
  * @build HeapChangeLogging
  * @summary Allocate to get a promotion failure and verify that that heap change logging is present.
  * @run main HeapChangeLogging
- *
- * Test the output of G1SummarizeRSetStats in conjunction with G1SummarizeRSetStatsPeriod.
  */
 
 import java.util.regex.Matcher;
@@ -78,4 +76,4 @@
     payload = new byte[payloadSize];
     this.previous = previous;
   }
-}
\ No newline at end of file
+}
--- a/hotspot/test/gc/g1/TestHumongousShrinkHeap.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestHumongousShrinkHeap.java	Mon Nov 24 23:28:48 2014 +0100
@@ -24,6 +24,7 @@
 /**
  * @test TestHumongousShrinkHeap
  * @bug 8036025 8056043
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @summary Verify that heap shrinks after GC in the presence of fragmentation
  * due to humongous objects
  * @library /testlibrary
--- a/hotspot/test/gc/g1/TestRegionAlignment.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestRegionAlignment.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test TestRegionAlignment.java
  * @bug 8013791
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @summary Make sure that G1 ergonomics pick a heap size that is aligned with the region size
  * @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -XX:MaxRAM=555m TestRegionAlignment
  *
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java	Mon Nov 24 23:28:48 2014 +0100
@@ -70,9 +70,7 @@
         printTestInfo(maxCacheSize);
 
         vmOpts.add("-XX:G1ConcRSLogCacheSize=" + RSetCacheSize);
-
-        vmOpts.addAll(Arrays.asList(Utils.getFilteredTestJavaOpts(
-                ShrinkAuxiliaryDataTest.prohibitedVmOptions)));
+        vmOpts.addAll(Arrays.asList(Utils.getTestJavaOpts()));
 
         // for 32 bits ObjectAlignmentInBytes is not a option
         if (Platform.is32bit()) {
@@ -273,14 +271,5 @@
         private static final int NUM_OBJECTS_PER_REGION = 10;
         private static final int NUM_LINKS = 20; // how many links create for each object
 
-        private static final String[] prohibitedVmOptions = {
-            // remove this when @requires option will be on duty
-            "-XX:\\+UseParallelGC",
-            "-XX:\\+UseSerialGC",
-            "-XX:\\+UseConcMarkSweepGC",
-            "-XX:\\+UseParallelOldGC",
-            "-XX:\\+UseParNewGC",
-            "-Xconcgc"
-        };
     }
 }
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData05.java	Mon Nov 24 23:28:48 2014 +0100
@@ -26,6 +26,7 @@
  * @bug 8038423
  * @summary Checks that decommitment occurs for JVM with different
  * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @library /testlibrary /testlibrary/whitebox
  * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData05
  * @run driver/timeout=720 TestShrinkAuxiliaryData05
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData10.java	Mon Nov 24 23:28:48 2014 +0100
@@ -26,6 +26,7 @@
  * @bug 8038423
  * @summary Checks that decommitment occurs for JVM with different
  * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @library /testlibrary /testlibrary/whitebox
  * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData10
  * @run driver/timeout=720 TestShrinkAuxiliaryData10
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData15.java	Mon Nov 24 23:28:48 2014 +0100
@@ -26,6 +26,7 @@
  * @bug 8038423
  * @summary Checks that decommitment occurs for JVM with different
  * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @library /testlibrary /testlibrary/whitebox
  * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData15
  * @run driver/timeout=720 TestShrinkAuxiliaryData15
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData20.java	Mon Nov 24 23:28:48 2014 +0100
@@ -26,6 +26,7 @@
  * @bug 8038423
  * @summary Checks that decommitment occurs for JVM with different
  * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @library /testlibrary /testlibrary/whitebox
  * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData20
  * @run driver/timeout=720 TestShrinkAuxiliaryData20
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData25.java	Mon Nov 24 23:28:48 2014 +0100
@@ -26,6 +26,7 @@
  * @bug 8038423
  * @summary Checks that decommitment occurs for JVM with different
  * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @library /testlibrary /testlibrary/whitebox
  * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData25
  * @run driver/timeout=720 TestShrinkAuxiliaryData25
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData30.java	Mon Nov 24 23:28:48 2014 +0100
@@ -26,6 +26,7 @@
  * @bug 8038423
  * @summary Checks that decommitment occurs for JVM with different
  * G1ConcRSLogCacheSize and ObjectAlignmentInBytes options values
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @library /testlibrary /testlibrary/whitebox
  * @build TestShrinkAuxiliaryData TestShrinkAuxiliaryData30
  * @run driver/timeout=720 TestShrinkAuxiliaryData30
--- a/hotspot/test/gc/g1/TestShrinkToOneRegion.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/g1/TestShrinkToOneRegion.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test TestShrinkToOneRegion.java
  * @bug 8013872
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @summary Shrinking the heap down to one region used to hit an assert
  * @run main/othervm -XX:+UseG1GC -XX:G1HeapRegionSize=32m -Xmx256m TestShrinkToOneRegion
  *
--- a/hotspot/test/gc/metaspace/G1AddMetaspaceDependency.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/metaspace/G1AddMetaspaceDependency.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 /*
  * @test G1AddMetaspaceDependency
  * @bug 8010196
+ * @requires vm.gc=="G1" | vm.gc=="null"
  * @summary Checks that we don't get locking problems when adding metaspace dependencies with the G1 update buffer monitor
  * @run main/othervm -XX:+UseG1GC -XX:G1UpdateBufferSize=1 G1AddMetaspaceDependency
  */
--- a/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/metaspace/TestMetaspacePerfCounters.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
 
 /* @test TestMetaspacePerfCounters
  * @bug 8014659
+ * @requires vm.gc=="null"
  * @library /testlibrary
  * @summary Tests that performance counters for metaspace and compressed class
  *          space exists and works.
--- a/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/metaspace/TestPerfCountersAndMemoryPools.java	Mon Nov 24 23:28:48 2014 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 /* @test TestPerfCountersAndMemoryPools
  * @bug 8023476
  * @library /testlibrary
+ * @requires vm.gc=="Serial" | vm.gc=="null"
  * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace
  *          report the same data.
  * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData -Xint TestPerfCountersAndMemoryPools
--- a/hotspot/test/gc/parallelScavenge/TestDynShrinkHeap.java	Fri Nov 21 08:00:31 2014 -0800
+++ b/hotspot/test/gc/parallelScavenge/TestDynShrinkHeap.java	Mon Nov 24 23:28:48 2014 +0100
@@ -25,6 +25,7 @@
  * @ignore 8019361
  * @test TestDynShrinkHeap
  * @bug 8016479
+ * @requires vm.gc=="Parallel" | vm.gc=="null"
  * @summary Verify that the heap shrinks after full GC according to the current values of the Min/MaxHeapFreeRatio flags
  * @library /testlibrary
  * @run main/othervm -XX:+UseAdaptiveSizePolicyWithSystemGC -XX:+UseParallelGC -XX:MinHeapFreeRatio=0 -XX:MaxHeapFreeRatio=100 -Xmx1g -verbose:gc TestDynShrinkHeap
--- a/hotspot/test/gc/startup_warnings/TestCMSForegroundFlags.java	Fri Nov 21 08:00:31 2014 -0800
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*/
-
-/*
-* @test TestCMSForegroundFlags
-* @key gc
-* @bug 8027132
-* @summary Test that the deprecated CMS foreground collector flags print warning messages
-* @library /testlibrary
-* @run main TestCMSForegroundFlags -XX:-UseCMSCompactAtFullCollection UseCMSCompactAtFullCollection
-* @run main TestCMSForegroundFlags -XX:CMSFullGCsBeforeCompaction=4 CMSFullGCsBeforeCompaction
-* @run main TestCMSForegroundFlags -XX:-UseCMSCollectionPassing UseCMSCollectionPassing
-*/
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-public class TestCMSForegroundFlags {
-  public static void main(String[] args) throws Exception {
-    if (args.length != 2) {
-      throw new Exception("Expected two arguments,flagValue and flagName");
-    }
-    String flagValue = args[0];
-    String flagName = args[1];
-
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flagValue, "-version");
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("warning: " + flagName + " is deprecated and will likely be removed in a future release.");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
-  }
-}