Merge
authoramurillo
Thu, 13 Nov 2014 16:11:00 -0800
changeset 27646 74c92bdda9b5
parent 27557 82f4cb44b2d7 (current diff)
parent 27645 8fe155d119af (diff)
child 27647 4dcb647196fd
child 27666 0b48f6967bbc
child 27685 26a697375de3
child 27695 c6e0ac3339ac
Merge
hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/EdenSpace.java
hotspot/test/compiler/7068051/Test7068051.sh
hotspot/test/gc/startup_warnings/TestCMSIncrementalMode.java
hotspot/test/gc/startup_warnings/TestCMSNoIncrementalMode.java
hotspot/test/gc/startup_warnings/TestIncGC.java
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/DefNewGeneration.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/DefNewGeneration.java	Thu Nov 13 16:11:00 2014 -0800
@@ -64,8 +64,8 @@
   }
 
   // Accessing spaces
-  public EdenSpace eden() {
-    return (EdenSpace) VMObjectFactory.newObject(EdenSpace.class, edenSpaceField.getValue(addr));
+  public ContiguousSpace eden() {
+    return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, edenSpaceField.getValue(addr));
   }
 
   public ContiguousSpace from() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/EdenSpace.java	Wed Jul 05 20:07:55 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.memory;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-/** <P> Class EdenSpace describes eden-space in new
-    generation. (Currently it does not add any significant
-    functionality beyond ContiguousSpace.) */
-
-public class EdenSpace extends ContiguousSpace {
-  public EdenSpace(Address addr) {
-    super(addr);
-  }
-}
--- a/hotspot/make/windows/makefiles/sa.make	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/make/windows/makefiles/sa.make	Thu Nov 13 16:11:00 2014 -0800
@@ -122,7 +122,7 @@
 SA_LFLAGS = $(SA_LFLAGS) -map -debug
 !endif
 !if "$(BUILDARCH)" == "i486"
-SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS)
+SA_LFLAGS = /SAFESEH $(SA_LFLAGS)
 !endif
 
 SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -3513,7 +3513,7 @@
                  Rtags            = R3_ARG1,
                  Rindex           = R5_ARG3;
 
-  const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+  const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
 
   // --------------------------------------------------------------------------
   // Check if fast case is possible.
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -2734,12 +2734,12 @@
 // box->dhw disposition - post-conditions at DONE_LABEL.
 // -   Successful inflated lock:  box->dhw != 0.
 //     Any non-zero value suffices.
-//     Consider G2_thread, rsp, boxReg, or unused_mark()
+//     Consider G2_thread, rsp, boxReg, or markOopDesc::unused_mark()
 // -   Successful Stack-lock: box->dhw == mark.
 //     box->dhw must contain the displaced mark word value
 // -   Failure -- icc.ZFlag == 0 and box->dhw is undefined.
 //     The slow-path fast_enter() and slow_enter() operators
-//     are responsible for setting box->dhw = NonZero (typically ::unused_mark).
+//     are responsible for setting box->dhw = NonZero (typically markOopDesc::unused_mark()).
 // -   Biased: box->dhw is undefined
 //
 // SPARC refworkload performance - specifically jetstream and scimark - are
@@ -2855,7 +2855,7 @@
          // If m->owner != null goto IsLocked
          // Pessimistic form: Test-and-CAS vs CAS
          // The optimistic form avoids RTS->RTO cache line upgrades.
-         ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+         ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch);
          andcc(Rscratch, Rscratch, G0);
          brx(Assembler::notZero, false, Assembler::pn, done);
          delayed()->nop();
@@ -2864,7 +2864,7 @@
 
       // Try to CAS m->owner from null to Self
       // Invariant: if we acquire the lock then _recursions should be 0.
-      add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+      add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
       mov(G2_thread, Rscratch);
       cas_ptr(Rmark, G0, Rscratch);
       cmp(Rscratch, G0);
@@ -2948,7 +2948,7 @@
          // Test-and-CAS vs CAS
          // Pessimistic form avoids futile (doomed) CAS attempts
          // The optimistic form avoids RTS->RTO cache line upgrades.
-         ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
+         ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch);
          andcc(Rscratch, Rscratch, G0);
          brx(Assembler::notZero, false, Assembler::pn, done);
          delayed()->nop();
@@ -2957,13 +2957,13 @@
 
       // Try to CAS m->owner from null to Self
       // Invariant: if we acquire the lock then _recursions should be 0.
-      add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+      add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
       mov(G2_thread, Rscratch);
       cas_ptr(Rmark, G0, Rscratch);
       cmp(Rscratch, G0);
       // ST box->displaced_header = NonZero.
       // Any non-zero value suffices:
-      //    unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
+      //    markOopDesc::unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
       st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
       // Intentional fall-through into done
    }
@@ -3031,30 +3031,30 @@
    // Note that we use 1-0 locking by default for the inflated case.  We
    // close the resultant (and rare) race by having contented threads in
    // monitorenter periodically poll _owner.
-   ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
-   ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
+   ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rscratch);
+   ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), Rbox);
    xor3(Rscratch, G2_thread, Rscratch);
    orcc(Rbox, Rscratch, Rbox);
    brx(Assembler::notZero, false, Assembler::pn, done);
    delayed()->
-   ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
-   ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
+   ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList), Rscratch);
+   ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq), Rbox);
    orcc(Rbox, Rscratch, G0);
    if (EmitSync & 65536) {
       Label LSucc ;
       brx(Assembler::notZero, false, Assembler::pn, LSucc);
       delayed()->nop();
       ba(done);
-      delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+      delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
 
       bind(LSucc);
-      st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+      st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
       if (os::is_MP()) { membar (StoreLoad); }
-      ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
+      ld_ptr(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ), Rscratch);
       andcc(Rscratch, Rscratch, G0);
       brx(Assembler::notZero, false, Assembler::pt, done);
       delayed()->andcc(G0, G0, G0);
-      add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
+      add(Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), Rmark);
       mov(G2_thread, Rscratch);
       cas_ptr(Rmark, G0, Rscratch);
       // invert icc.zf and goto done
@@ -3066,7 +3066,7 @@
       brx(Assembler::notZero, false, Assembler::pn, done);
       delayed()->nop();
       ba(done);
-      delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
+      delayed()->st_ptr(G0, Rmark, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner));
    }
 
    bind   (LStacked);
@@ -3196,7 +3196,7 @@
   assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
   assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
 
-  if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+  if (!Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
     ba(slow_case);
     delayed()->nop();
@@ -3331,7 +3331,7 @@
   assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
   Label do_refill, discard_tlab;
 
-  if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+  if (!Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
     ba(slow_case);
     delayed()->nop();
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -3309,7 +3309,7 @@
   // (creates a new TLAB, etc.)
 
   const bool allow_shared_alloc =
-    Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+    Universe::heap()->supports_inline_contig_alloc();
 
   if(UseTLAB) {
     Register RoldTopValue = RallocatedObject;
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1450,8 +1450,7 @@
 void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
                                             Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
   Label SpinLoop, SpinExit, doneRetry;
-  // Clean monitor_value bit to get valid pointer
-  int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+  int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
 
   testl(retry_count_Reg, retry_count_Reg);
   jccb(Assembler::zero, doneRetry);
@@ -1532,7 +1531,7 @@
 // Use RTM for inflating locks
 // inputs: objReg (object to lock)
 //         boxReg (on-stack box address (displaced header location) - KILLED)
-//         tmpReg (ObjectMonitor address + 2(monitor_value))
+//         tmpReg (ObjectMonitor address + markOopDesc::monitor_value)
 void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
                                           Register scrReg, Register retry_on_busy_count_Reg,
                                           Register retry_on_abort_count_Reg,
@@ -1543,8 +1542,7 @@
   assert(tmpReg == rax, "");
   assert(scrReg == rdx, "");
   Label L_rtm_retry, L_decrement_retry, L_on_abort;
-  // Clean monitor_value bit to get valid pointer
-  int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+  int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
 
   // Without cast to int32_t a movptr will destroy r10 which is typically obj
   movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
@@ -1716,7 +1714,7 @@
     atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
   }
   if (EmitSync & 1) {
-      // set box->dhw = unused_mark (3)
+      // set box->dhw = markOopDesc::unused_mark()
       // Force all sync thru slow-path: slow_enter() and slow_exit()
       movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
       cmpptr (rsp, (int32_t)NULL_WORD);
@@ -1769,7 +1767,7 @@
     // at [FETCH], below, will never observe a biased encoding (*101b).
     // If this invariant is not held we risk exclusion (safety) failure.
     if (UseBiasedLocking && !UseOptoBiasInlining) {
-      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
+      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
     }
 
 #if INCLUDE_RTM_OPT
@@ -1811,7 +1809,7 @@
     jmp(DONE_LABEL);
 
     bind(IsInflated);
-    // The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value)
+    // The object is inflated. tmpReg contains pointer to ObjectMonitor* + markOopDesc::monitor_value
 
 #if INCLUDE_RTM_OPT
     // Use the same RTM locking code in 32- and 64-bit VM.
@@ -1823,25 +1821,10 @@
 
 #ifndef _LP64
     // The object is inflated.
-    //
-    // TODO-FIXME: eliminate the ugly use of manifest constants:
-    //   Use markOopDesc::monitor_value instead of "2".
-    //   use markOop::unused_mark() instead of "3".
-    // The tmpReg value is an objectMonitor reference ORed with
-    // markOopDesc::monitor_value (2).   We can either convert tmpReg to an
-    // objectmonitor pointer by masking off the "2" bit or we can just
-    // use tmpReg as an objectmonitor pointer but bias the objectmonitor
-    // field offsets with "-2" to compensate for and annul the low-order tag bit.
-    //
-    // I use the latter as it avoids AGI stalls.
-    // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
-    // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
-    //
-    #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
 
     // boxReg refers to the on-stack BasicLock in the current frame.
     // We'd like to write:
-    //   set box->_displaced_header = markOop::unused_mark().  Any non-0 value suffices.
+    //   set box->_displaced_header = markOopDesc::unused_mark().  Any non-0 value suffices.
     // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
     // additional latency as we have another ST in the store buffer that must drain.
 
@@ -1853,7 +1836,7 @@
        if (os::is_MP()) {
          lock();
        }
-       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
     } else
     if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
        movptr(scrReg, boxReg);
@@ -1862,7 +1845,7 @@
        // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
        if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
           // prefetchw [eax + Offset(_owner)-2]
-          prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+          prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
        }
 
        if ((EmitSync & 64) == 0) {
@@ -1871,7 +1854,7 @@
        } else {
          // Can suffer RTS->RTO upgrades on shared or cold $ lines
          // Test-And-CAS instead of CAS
-         movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));   // rax, = m->_owner
+         movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
          testptr(tmpReg, tmpReg);                   // Locked ?
          jccb  (Assembler::notZero, DONE_LABEL);
        }
@@ -1887,11 +1870,11 @@
        if (os::is_MP()) {
          lock();
        }
-       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
        movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
        jccb  (Assembler::notZero, DONE_LABEL);
        get_thread (scrReg);                    // beware: clobbers ICCs
-       movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg);
+       movptr(Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), scrReg);
        xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success
 
        // If the CAS fails we can either retry or pass control to the slow-path.
@@ -1908,7 +1891,7 @@
        // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
        if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
           // prefetchw [eax + Offset(_owner)-2]
-          prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+          prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
        }
 
        if ((EmitSync & 64) == 0) {
@@ -1916,7 +1899,7 @@
          xorptr  (tmpReg, tmpReg);
        } else {
          // Can suffer RTS->RTO upgrades on shared or cold $ lines
-         movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));   // rax, = m->_owner
+         movptr(tmpReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));   // rax, = m->_owner
          testptr(tmpReg, tmpReg);                   // Locked ?
          jccb  (Assembler::notZero, DONE_LABEL);
        }
@@ -1928,7 +1911,7 @@
        if (os::is_MP()) {
          lock();
        }
-       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       cmpxchgptr(scrReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
 
        // If the CAS fails we can either retry or pass control to the slow-path.
        // We use the latter tactic.
@@ -1951,7 +1934,7 @@
     movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
 
     movptr (boxReg, tmpReg);
-    movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    movptr(tmpReg, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
     testptr(tmpReg, tmpReg);
     jccb   (Assembler::notZero, DONE_LABEL);
 
@@ -1959,7 +1942,7 @@
     if (os::is_MP()) {
       lock();
     }
-    cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    cmpxchgptr(r15_thread, Address(boxReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
     // Intentional fall-through into DONE_LABEL ...
 #endif // _LP64
 
@@ -2065,8 +2048,7 @@
 #if INCLUDE_RTM_OPT
     if (use_rtm) {
       Label L_regular_inflated_unlock;
-      // Clean monitor_value bit to get valid pointer
-      int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
+      int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
       movptr(boxReg, Address(tmpReg, owner_offset));
       testptr(boxReg, boxReg);
       jccb(Assembler::notZero, L_regular_inflated_unlock);
@@ -2102,7 +2084,7 @@
     get_thread (boxReg);
     if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
       // prefetchw [ebx + Offset(_owner)-2]
-      prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+      prefetchw(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
     }
 
     // Note that we could employ various encoding schemes to reduce
@@ -2111,21 +2093,21 @@
     // In practice the chain of fetches doesn't seem to impact performance, however.
     if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
        // Attempt to reduce branch density - AMD's branch predictor.
-       xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
-       orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
-       orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
-       orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+       xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
        jccb  (Assembler::notZero, DONE_LABEL);
-       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
        jmpb  (DONE_LABEL);
     } else {
-       xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
-       orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+       xorptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
        jccb  (Assembler::notZero, DONE_LABEL);
-       movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
-       orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+       movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+       orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
        jccb  (Assembler::notZero, CheckSucc);
-       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
        jmpb  (DONE_LABEL);
     }
 
@@ -2143,7 +2125,7 @@
 
        // Optional pre-test ... it's safe to elide this
        if ((EmitSync & 16) == 0) {
-          cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+          cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
           jccb  (Assembler::zero, LGoSlowPath);
        }
 
@@ -2173,7 +2155,7 @@
        // We currently use (3), although it's likely that switching to (2)
        // is correct for the future.
 
-       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
        if (os::is_MP()) {
           if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
             mfence();
@@ -2182,18 +2164,18 @@
           }
        }
        // Ratify _succ remains non-null
-       cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0);
+       cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), 0);
        jccb  (Assembler::notZero, LSuccess);
 
        xorptr(boxReg, boxReg);                  // box is really EAX
        if (os::is_MP()) { lock(); }
-       cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       cmpxchgptr(rsp, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
        jccb  (Assembler::notEqual, LSuccess);
        // Since we're low on registers we installed rsp as a placeholding in _owner.
        // Now install Self over rsp.  This is safe as we're transitioning from
        // non-null to non=null
        get_thread (boxReg);
-       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg);
+       movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), boxReg);
        // Intentional fall-through into LGoSlowPath ...
 
        bind  (LGoSlowPath);
@@ -2228,36 +2210,36 @@
     }
 #else // _LP64
     // It's inflated
-    movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
     xorptr(boxReg, r15_thread);
-    orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+    orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
     jccb  (Assembler::notZero, DONE_LABEL);
-    movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
-    orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
+    movptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
+    orptr(boxReg, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
     jccb  (Assembler::notZero, CheckSucc);
-    movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
+    movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
     jmpb  (DONE_LABEL);
 
     if ((EmitSync & 65536) == 0) {
       Label LSuccess, LGoSlowPath ;
       bind  (CheckSucc);
-      cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
       jccb  (Assembler::zero, LGoSlowPath);
 
       // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
       // the explicit ST;MEMBAR combination, but masm doesn't currently support
       // "ANDQ M,IMM".  Don't use MFENCE here.  lock:add to TOS, xchg, etc
       // are all faster when the write buffer is populated.
-      movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      movptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t)NULL_WORD);
       if (os::is_MP()) {
          lock (); addl (Address(rsp, 0), 0);
       }
-      cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      cmpptr(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), (int32_t)NULL_WORD);
       jccb  (Assembler::notZero, LSuccess);
 
       movptr (boxReg, (int32_t)NULL_WORD);                   // box is really EAX
       if (os::is_MP()) { lock(); }
-      cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+      cmpxchgptr(r15_thread, Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
       jccb  (Assembler::notEqual, LSuccess);
       // Intentional fall-through into slow-path
 
@@ -2964,7 +2946,7 @@
                                    Label& slow_case) {
   assert(obj == rax, "obj must be in rax, for cmpxchg");
   assert_different_registers(obj, var_size_in_bytes, t1);
-  if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+  if (!Universe::heap()->supports_inline_contig_alloc()) {
     jmp(slow_case);
   } else {
     Register end = t1;
@@ -4437,7 +4419,7 @@
   assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
   Label do_refill, discard_tlab;
 
-  if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+  if (!Universe::heap()->supports_inline_contig_alloc()) {
     // No allocation in the shared eden.
     jmp(slow_case);
   }
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -3214,7 +3214,7 @@
   // (creates a new TLAB, etc.)
 
   const bool allow_shared_alloc =
-    Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+    Universe::heap()->supports_inline_contig_alloc();
 
   const Register thread = rcx;
   if (UseTLAB || allow_shared_alloc) {
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -3269,7 +3269,7 @@
   // (creates a new TLAB, etc.)
 
   const bool allow_shared_alloc =
-    Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+    Universe::heap()->supports_inline_contig_alloc();
 
   if (UseTLAB) {
     __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -2608,7 +2608,10 @@
   assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
   assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
   if (UseLargePages) {
-    Solaris::setup_large_pages(addr, bytes, alignment_hint);
+    size_t page_size = Solaris::page_size_for_alignment(alignment_hint);
+    if (page_size > (size_t) vm_page_size()) {
+      Solaris::setup_large_pages(addr, bytes, page_size);
+    }
   }
 }
 
--- a/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -31,6 +31,11 @@
 
 // Implementation of class OrderAccess.
 
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
+static inline void compiler_barrier() {
+  __asm__ volatile ("" : : : "memory");
+}
+
 inline void OrderAccess::loadload()   { acquire(); }
 inline void OrderAccess::storestore() { release(); }
 inline void OrderAccess::loadstore()  { acquire(); }
@@ -46,9 +51,7 @@
 }
 
 inline void OrderAccess::release() {
-  // Avoid hitting the same cache-line from
-  // different threads.
-  volatile jint local_dummy = 0;
+  compiler_barrier();
 }
 
 inline void OrderAccess::fence() {
@@ -62,34 +65,34 @@
   }
 }
 
-inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
-inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
-inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
-inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
-inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
-inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
-inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
-inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
+inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { jbyte   v = *p; compiler_barrier(); return v; }
+inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { jshort  v = *p; compiler_barrier(); return v; }
+inline jint     OrderAccess::load_acquire(volatile jint*    p) { jint    v = *p; compiler_barrier(); return v; }
+inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { jlong   v = Atomic::load(p); compiler_barrier(); return v; }
+inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { jubyte  v = *p; compiler_barrier(); return v; }
+inline jushort  OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; }
+inline juint    OrderAccess::load_acquire(volatile juint*   p) { juint   v = *p; compiler_barrier(); return v; }
+inline julong   OrderAccess::load_acquire(volatile julong*  p) { julong  v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; }
+inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { jfloat  v = *p; compiler_barrier(); return v; }
+inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; }
 
-inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
-inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
-inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
+inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { intptr_t v = *p; compiler_barrier(); return v; }
+inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { void*    v = *(void* volatile *)p; compiler_barrier(); return v; }
+inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { void*    v = *(void* const volatile *)p; compiler_barrier(); return v; }
 
-inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
-inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
-inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
-inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
-inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
+inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { compiler_barrier(); *p = v; }
+inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { compiler_barrier(); *p = v; }
+inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { compiler_barrier(); *p = v; }
+inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { compiler_barrier(); Atomic::store(v, p); }
+inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { compiler_barrier(); *p = v; }
+inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; }
+inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { compiler_barrier(); *p = v; }
+inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); }
+inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { compiler_barrier(); *p = v; }
 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
 
-inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
-inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
+inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; }
+inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { compiler_barrier(); *(void* volatile *)p = v; }
 
 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
   __asm__ volatile (  "xchgb (%2),%0"
--- a/hotspot/src/share/vm/Xusage.txt	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/Xusage.txt	Thu Nov 13 16:11:00 2014 -0800
@@ -7,7 +7,6 @@
     -Xbootclasspath/p:<directories and zip/jar files separated by ;>
                       prepend in front of bootstrap class path
     -Xnoclassgc       disable class garbage collection
-    -Xincgc           enable incremental garbage collection
     -Xloggc:<file>    log GC status to a file with time stamps
     -Xbatch           disable background compilation
     -Xms<size>        set initial Java heap size
--- a/hotspot/src/share/vm/ci/ciMethod.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -68,7 +68,10 @@
 // ciMethod::ciMethod
 //
 // Loaded method.
-ciMethod::ciMethod(methodHandle h_m) : ciMetadata(h_m()) {
+ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) :
+  ciMetadata(h_m()),
+  _holder(holder)
+{
   assert(h_m() != NULL, "no null method");
 
   // These fields are always filled in in loaded methods.
@@ -124,7 +127,6 @@
   // generating _signature may allow GC and therefore move m.
   // These fields are always filled in.
   _name = env->get_symbol(h_m()->name());
-  _holder = env->get_instance_klass(h_m()->method_holder());
   ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
   constantPoolHandle cpool = h_m()->constants();
   _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
--- a/hotspot/src/share/vm/ci/ciMethod.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -91,7 +91,7 @@
   BCEscapeAnalyzer*   _bcea;
 #endif
 
-  ciMethod(methodHandle h_m);
+  ciMethod(methodHandle h_m, ciInstanceKlass* holder);
   ciMethod(ciInstanceKlass* holder, ciSymbol* name, ciSymbol* signature, ciInstanceKlass* accessor);
 
   Method* get_Method() const {
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -239,7 +239,7 @@
 ciObject* ciObjectFactory::get(oop key) {
   ASSERT_IN_VM;
 
-  assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be");
+  assert(Universe::heap()->is_in_reserved(key), "must be");
 
   NonPermObject* &bucket = find_non_perm(key);
   if (bucket != NULL) {
@@ -260,10 +260,10 @@
 }
 
 // ------------------------------------------------------------------
-// ciObjectFactory::get
+// ciObjectFactory::get_metadata
 //
-// Get the ciObject corresponding to some oop.  If the ciObject has
-// already been created, it is returned.  Otherwise, a new ciObject
+// Get the ciMetadata corresponding to some Metadata. If the ciMetadata has
+// already been created, it is returned. Otherwise, a new ciMetadata
 // is created.
 ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
   ASSERT_IN_VM;
@@ -290,9 +290,9 @@
   }
 #endif
   if (!is_found_at(index, key, _ci_metadata)) {
-    // The ciObject does not yet exist.  Create it and insert it
+    // The ciMetadata does not yet exist. Create it and insert it
     // into the cache.
-    ciMetadata* new_object = create_new_object(key);
+    ciMetadata* new_object = create_new_metadata(key);
     init_ident_of(new_object);
     assert(new_object->is_metadata(), "must be");
 
@@ -344,15 +344,28 @@
 }
 
 // ------------------------------------------------------------------
-// ciObjectFactory::create_new_object
+// ciObjectFactory::create_new_metadata
 //
-// Create a new ciObject from a Metadata*.
+// Create a new ciMetadata from a Metadata*.
 //
-// Implementation note: this functionality could be virtual behavior
-// of the oop itself.  For now, we explicitly marshal the object.
-ciMetadata* ciObjectFactory::create_new_object(Metadata* o) {
+// Implementation note: in order to keep Metadata live, an auxiliary ciObject
+// is used, which points to it's holder.
+ciMetadata* ciObjectFactory::create_new_metadata(Metadata* o) {
   EXCEPTION_CONTEXT;
 
+  // Hold metadata from unloading by keeping it's holder alive.
+  if (_initialized && o->is_klass()) {
+    Klass* holder = ((Klass*)o);
+    if (holder->oop_is_instance() && InstanceKlass::cast(holder)->is_anonymous()) {
+      // Though ciInstanceKlass records class loader oop, it's not enough to keep
+      // VM anonymous classes alive (loader == NULL). Klass holder should be used instead.
+      // It is enough to record a ciObject, since cached elements are never removed
+      // during ciObjectFactory lifetime. ciObjectFactory itself is created for
+      // every compilation and lives for the whole duration of the compilation.
+      ciObject* h = get(holder->klass_holder());
+    }
+  }
+
   if (o->is_klass()) {
     KlassHandle h_k(THREAD, (Klass*)o);
     Klass* k = (Klass*)o;
@@ -365,14 +378,16 @@
     }
   } else if (o->is_method()) {
     methodHandle h_m(THREAD, (Method*)o);
-    return new (arena()) ciMethod(h_m);
+    ciEnv *env = CURRENT_THREAD_ENV;
+    ciInstanceKlass* holder = env->get_instance_klass(h_m()->method_holder());
+    return new (arena()) ciMethod(h_m, holder);
   } else if (o->is_methodData()) {
     // Hold methodHandle alive - might not be necessary ???
     methodHandle h_m(THREAD, ((MethodData*)o)->method());
     return new (arena()) ciMethodData((MethodData*)o);
   }
 
-  // The oop is of some type not supported by the compiler interface.
+  // The Metadata* is of some type not supported by the compiler interface.
   ShouldNotReachHere();
   return NULL;
 }
@@ -701,7 +716,7 @@
 // If there is no entry in the cache corresponding to this oop, return
 // the null tail of the bucket into which the oop should be inserted.
 ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
-  assert(Universe::heap()->is_in_reserved_or_null(key), "must be");
+  assert(Universe::heap()->is_in_reserved(key), "must be");
   ciMetadata* klass = get_metadata(key->klass());
   NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
   for (NonPermObject* p; (p = (*bp)) != NULL; bp = &p->next()) {
--- a/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -73,7 +73,7 @@
   void insert(int index, ciMetadata* obj, GrowableArray<ciMetadata*>* objects);
 
   ciObject* create_new_object(oop o);
-  ciMetadata* create_new_object(Metadata* o);
+  ciMetadata* create_new_metadata(Metadata* o);
 
   void ensure_metadata_alive(ciMetadata* m);
 
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -2059,7 +2059,7 @@
   u2** localvariable_table_start;
   u2* localvariable_type_table_length;
   u2** localvariable_type_table_start;
-  u2 method_parameters_length = 0;
+  int method_parameters_length = -1;
   u1* method_parameters_data = NULL;
   bool method_parameters_seen = false;
   bool parsed_code_attribute = false;
@@ -2278,7 +2278,8 @@
       }
       method_parameters_seen = true;
       method_parameters_length = cfs->get_u1_fast();
-      if (method_attribute_length != (method_parameters_length * 4u) + 1u) {
+      const u2 real_length = (method_parameters_length * 4u) + 1u;
+      if (method_attribute_length != real_length) {
         classfile_parse_error(
           "Invalid MethodParameters method attribute length %u in class file",
           method_attribute_length, CHECK_(nullHandle));
@@ -2288,7 +2289,7 @@
       cfs->skip_u2_fast(method_parameters_length);
       // ignore this attribute if it cannot be reflected
       if (!SystemDictionary::Parameter_klass_loaded())
-        method_parameters_length = 0;
+        method_parameters_length = -1;
     } else if (method_attribute_name == vmSymbols::tag_synthetic()) {
       if (method_attribute_length != 0) {
         classfile_parse_error(
@@ -3491,17 +3492,18 @@
           real_offset = next_nonstatic_oop_offset;
           next_nonstatic_oop_offset += heapOopSize;
         }
-        // Update oop maps
+
+        // Record this oop in the oop maps
         if( nonstatic_oop_map_count > 0 &&
             nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
             real_offset -
             int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
             heapOopSize ) {
-          // Extend current oop map
+          // This oop is adjacent to the previous one, add to current oop map
           assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
           nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
         } else {
-          // Create new oop map
+          // This oop is not adjacent to the previous one, create new oop map
           assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
           nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
           nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
@@ -3623,13 +3625,24 @@
             real_offset = next_nonstatic_padded_offset;
             next_nonstatic_padded_offset += heapOopSize;
 
-            // Create new oop map
-            assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
-            nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
-            nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
-            nonstatic_oop_map_count += 1;
-            if( first_nonstatic_oop_offset == 0 ) { // Undefined
-              first_nonstatic_oop_offset = real_offset;
+            // Record this oop in the oop maps
+            if( nonstatic_oop_map_count > 0 &&
+                nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
+                real_offset -
+                int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
+                heapOopSize ) {
+              // This oop is adjacent to the previous one, add to current oop map
+              assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
+              nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
+            } else {
+              // This oop is not adjacent to the previous one, create new oop map
+              assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
+              nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
+              nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
+              nonstatic_oop_map_count += 1;
+              if( first_nonstatic_oop_offset == 0 ) { // Undefined
+                first_nonstatic_oop_offset = real_offset;
+              }
             }
             break;
 
--- a/hotspot/src/share/vm/classfile/classLoader.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1120,7 +1120,7 @@
     h = context.record_result(classpath_index, e, result, THREAD);
   } else {
     if (DumpSharedSpaces) {
-      tty->print_cr("Preload Error: Cannot find %s", class_name);
+      tty->print_cr("Preload Warning: Cannot find %s", class_name);
     }
   }
 
--- a/hotspot/src/share/vm/classfile/dictionary.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -223,7 +223,7 @@
         }
         free_entry(probe);
         ResourceMark rm;
-        tty->print_cr("Removed error class: %s", ik->external_name());
+        tty->print_cr("Preload Warning: Removed error class: %s", ik->external_name());
         continue;
       }
 
--- a/hotspot/src/share/vm/code/codeBlob.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -43,7 +43,7 @@
 #include "c1/c1_Runtime1.hpp"
 #endif
 
-unsigned int align_code_offset(int offset) {
+unsigned int CodeBlob::align_code_offset(int offset) {
   // align the size to CodeEntryAlignment
   return
     ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
--- a/hotspot/src/share/vm/code/codeBlob.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/code/codeBlob.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -83,6 +83,7 @@
  public:
   // Returns the space needed for CodeBlob
   static unsigned int allocation_size(CodeBuffer* cb, int header_size);
+  static unsigned int align_code_offset(int offset);
 
   // Creation
   // a) simple CodeBlob
@@ -207,7 +208,7 @@
   }
 };
 
-
+class WhiteBox;
 //----------------------------------------------------------------------------------------------------
 // BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
 
@@ -215,6 +216,7 @@
   friend class VMStructs;
   friend class AdapterBlob;
   friend class MethodHandlesAdapterBlob;
+  friend class WhiteBox;
 
  private:
   // Creation support
--- a/hotspot/src/share/vm/code/codeCache.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -305,7 +305,7 @@
   MemoryService::add_code_heap_memory_pool(heap, name);
 }
 
-CodeHeap* CodeCache::get_code_heap(CodeBlob* cb) {
+CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) {
   assert(cb != NULL, "CodeBlob is null");
   FOR_ALL_HEAPS(heap) {
     if ((*heap)->contains(cb)) {
--- a/hotspot/src/share/vm/code/codeCache.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/code/codeCache.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -77,6 +77,7 @@
 class CodeCache : AllStatic {
   friend class VMStructs;
   friend class NMethodIterator;
+  friend class WhiteBox;
  private:
   // CodeHeaps of the cache
   static GrowableArray<CodeHeap*>* _heaps;
@@ -98,7 +99,7 @@
   static void initialize_heaps();                             // Initializes the CodeHeaps
   // Creates a new heap with the given name and size, containing CodeBlobs of the given type
   static void add_heap(ReservedSpace rs, const char* name, size_t size_initial, int code_blob_type);
-  static CodeHeap* get_code_heap(CodeBlob* cb);               // Returns the CodeHeap for the given CodeBlob
+  static CodeHeap* get_code_heap(const CodeBlob* cb);         // Returns the CodeHeap for the given CodeBlob
   static CodeHeap* get_code_heap(int code_blob_type);         // Returns the CodeHeap for the given CodeBlobType
   // Returns the name of the VM option to set the size of the corresponding CodeHeap
   static const char* get_code_heap_flag_name(int code_blob_type);
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -35,6 +35,7 @@
 #include "oops/method.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/nativeLookup.hpp"
+#include "prims/whitebox.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/atomic.inline.hpp"
 #include "runtime/compilationPolicy.hpp"
@@ -1963,6 +1964,12 @@
     if (comp == NULL) {
       ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
     } else {
+      if (WhiteBoxAPI && WhiteBox::compilation_locked) {
+        MonitorLockerEx locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
+        while (WhiteBox::compilation_locked) {
+          locker.wait(Mutex::_no_safepoint_check_flag);
+        }
+      }
       comp->compile_method(&ci_env, target, osr_bci);
     }
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -89,9 +89,3 @@
     _gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
   }
 }
-
-// Returns true if the incremental mode is enabled.
-bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
-{
-  return CMSIncrementalMode;
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -42,9 +42,6 @@
   virtual void initialize_size_policy(size_t init_eden_size,
                                       size_t init_promo_size,
                                       size_t init_survivor_size);
-
-  // Returns true if the incremental mode is enabled.
-  virtual bool has_soft_ended_eden();
 };
 
 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -2083,17 +2083,13 @@
 }
 
 // Support for compaction
-
 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
+  scan_and_forward(this, cp);
   // Prepare_for_compaction() uses the space between live objects
   // so that later phase can skip dead space quickly.  So verification
   // of the free lists doesn't work after.
 }
 
-#define obj_size(q) adjustObjectSize(oop(q)->size())
-#define adjust_obj_size(s) adjustObjectSize(s)
-
 void CompactibleFreeListSpace::adjust_pointers() {
   // In other versions of adjust_pointers(), a bail out
   // based on the amount of live data in the generation
@@ -2101,12 +2097,12 @@
   // Cannot test used() == 0 here because the free lists have already
   // been mangled by the compaction.
 
-  SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
+  scan_and_adjust_pointers(this);
   // See note about verification in prepare_for_compaction().
 }
 
 void CompactibleFreeListSpace::compact() {
-  SCAN_AND_COMPACT(obj_size);
+  scan_and_compact(this);
 }
 
 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
@@ -2629,7 +2625,7 @@
   // Get the #blocks we want to claim
   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
   assert(n_blks > 0, "Error");
-  assert(ResizePLAB || n_blks == OldPLABSize, "Error");
+  assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
   // In some cases, when the application has a phase change,
   // there may be a sudden and sharp shift in the object survival
   // profile, and updating the counts at the end of a scavenge
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -73,6 +73,13 @@
   friend class CMSCollector;
   // Local alloc buffer for promotion into this space.
   friend class CFLS_LAB;
+  // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
 
   // "Size" of chunks of work (executed during parallel remark phases
   // of CMS collection); this probably belongs in CMSCollector, although
@@ -288,6 +295,28 @@
     _bt.freed(start, size);
   }
 
+  // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return end();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
+  }
+
+  inline size_t adjust_obj_size(size_t size) const {
+    return adjustObjectSize(size);
+  }
+
+  inline size_t obj_size(const HeapWord* addr) const {
+    return adjustObjectSize(oop(addr)->size());
+  }
+
  protected:
   // Reset the indexed free list to its initial empty condition.
   void resetIndexedFreeListArray();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -167,16 +167,6 @@
 };
 
 
-// Wrapper class to temporarily disable icms during a foreground cms collection.
-class ICMSDisabler: public StackObj {
- public:
-  // The ctor disables icms and wakes up the thread so it notices the change;
-  // the dtor re-enables icms.  Note that the CMSCollector methods will check
-  // CMSIncrementalMode.
-  ICMSDisabler()  { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
-  ~ICMSDisabler() { CMSCollector::enable_icms(); }
-};
-
 //////////////////////////////////////////////////////////////////
 //  Concurrent Mark-Sweep Generation /////////////////////////////
 //////////////////////////////////////////////////////////////////
@@ -363,7 +353,6 @@
   _cms_used_at_gc0_end = 0;
   _allow_duty_cycle_reduction = false;
   _valid_bits = 0;
-  _icms_duty_cycle = CMSIncrementalDutyCycle;
 }
 
 double CMSStats::cms_free_adjustment_factor(size_t free) const {
@@ -442,86 +431,17 @@
   return work - deadline;
 }
 
-// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
-// amount of change to prevent wild oscillation.
-unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
-                                              unsigned int new_duty_cycle) {
-  assert(old_duty_cycle <= 100, "bad input value");
-  assert(new_duty_cycle <= 100, "bad input value");
-
-  // Note:  use subtraction with caution since it may underflow (values are
-  // unsigned).  Addition is safe since we're in the range 0-100.
-  unsigned int damped_duty_cycle = new_duty_cycle;
-  if (new_duty_cycle < old_duty_cycle) {
-    const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
-    if (new_duty_cycle + largest_delta < old_duty_cycle) {
-      damped_duty_cycle = old_duty_cycle - largest_delta;
-    }
-  } else if (new_duty_cycle > old_duty_cycle) {
-    const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
-    if (new_duty_cycle > old_duty_cycle + largest_delta) {
-      damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
-    }
-  }
-  assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
-
-  if (CMSTraceIncrementalPacing) {
-    gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
-                           old_duty_cycle, new_duty_cycle, damped_duty_cycle);
-  }
-  return damped_duty_cycle;
-}
-
-unsigned int CMSStats::icms_update_duty_cycle_impl() {
-  assert(CMSIncrementalPacing && valid(),
-         "should be handled in icms_update_duty_cycle()");
-
-  double cms_time_so_far = cms_timer().seconds();
-  double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
-  double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
-
-  // Avoid division by 0.
-  double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
-  double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
-
-  unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
-  if (new_duty_cycle > _icms_duty_cycle) {
-    // Avoid very small duty cycles (1 or 2); 0 is allowed.
-    if (new_duty_cycle > 2) {
-      _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
-                                                new_duty_cycle);
-    }
-  } else if (_allow_duty_cycle_reduction) {
-    // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
-    new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
-    // Respect the minimum duty cycle.
-    unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
-    _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
-  }
-
-  if (PrintGCDetails || CMSTraceIncrementalPacing) {
-    gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
-  }
-
-  _allow_duty_cycle_reduction = false;
-  return _icms_duty_cycle;
-}
-
 #ifndef PRODUCT
 void CMSStats::print_on(outputStream *st) const {
   st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
   st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
                gc0_duration(), gc0_period(), gc0_promoted());
-  st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
-            cms_duration(), cms_duration_per_mb(),
-            cms_period(), cms_allocated());
+  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
+            cms_duration(), cms_period(), cms_allocated());
   st->print(",cms_since_beg=%g,cms_since_end=%g",
             cms_time_since_begin(), cms_time_since_end());
   st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
             _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
-  if (CMSIncrementalMode) {
-    st->print(",dc=%d", icms_duty_cycle());
-  }
 
   if (valid()) {
     st->print(",promo_rate=%g,cms_alloc_rate=%g",
@@ -579,8 +499,6 @@
 #endif
   _collection_count_start(0),
   _verifying(false),
-  _icms_start_limit(NULL),
-  _icms_stop_limit(NULL),
   _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
   _completed_initialization(false),
   _collector_policy(cp),
@@ -1116,137 +1034,6 @@
   }
 }
 
-static inline size_t percent_of_space(Space* space, HeapWord* addr)
-{
-  size_t delta = pointer_delta(addr, space->bottom());
-  return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
-}
-
-void CMSCollector::icms_update_allocation_limits()
-{
-  Generation* young = GenCollectedHeap::heap()->get_gen(0);
-  EdenSpace* eden = young->as_DefNewGeneration()->eden();
-
-  const unsigned int duty_cycle = stats().icms_update_duty_cycle();
-  if (CMSTraceIncrementalPacing) {
-    stats().print();
-  }
-
-  assert(duty_cycle <= 100, "invalid duty cycle");
-  if (duty_cycle != 0) {
-    // The duty_cycle is a percentage between 0 and 100; convert to words and
-    // then compute the offset from the endpoints of the space.
-    size_t free_words = eden->free() / HeapWordSize;
-    double free_words_dbl = (double)free_words;
-    size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
-    size_t offset_words = (free_words - duty_cycle_words) / 2;
-
-    _icms_start_limit = eden->top() + offset_words;
-    _icms_stop_limit = eden->end() - offset_words;
-
-    // The limits may be adjusted (shifted to the right) by
-    // CMSIncrementalOffset, to allow the application more mutator time after a
-    // young gen gc (when all mutators were stopped) and before CMS starts and
-    // takes away one or more cpus.
-    if (CMSIncrementalOffset != 0) {
-      double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
-      size_t adjustment = (size_t)adjustment_dbl;
-      HeapWord* tmp_stop = _icms_stop_limit + adjustment;
-      if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
-        _icms_start_limit += adjustment;
-        _icms_stop_limit = tmp_stop;
-      }
-    }
-  }
-  if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
-    _icms_start_limit = _icms_stop_limit = eden->end();
-  }
-
-  // Install the new start limit.
-  eden->set_soft_end(_icms_start_limit);
-
-  if (CMSTraceIncrementalMode) {
-    gclog_or_tty->print(" icms alloc limits:  "
-                           PTR_FORMAT "," PTR_FORMAT
-                           " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
-                           p2i(_icms_start_limit), p2i(_icms_stop_limit),
-                           percent_of_space(eden, _icms_start_limit),
-                           percent_of_space(eden, _icms_stop_limit));
-    if (Verbose) {
-      gclog_or_tty->print("eden:  ");
-      eden->print_on(gclog_or_tty);
-    }
-  }
-}
-
-// Any changes here should try to maintain the invariant
-// that if this method is called with _icms_start_limit
-// and _icms_stop_limit both NULL, then it should return NULL
-// and not notify the icms thread.
-HeapWord*
-CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
-                                       size_t word_size)
-{
-  // A start_limit equal to end() means the duty cycle is 0, so treat that as a
-  // nop.
-  if (CMSIncrementalMode && _icms_start_limit != space->end()) {
-    if (top <= _icms_start_limit) {
-      if (CMSTraceIncrementalMode) {
-        space->print_on(gclog_or_tty);
-        gclog_or_tty->stamp();
-        gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
-                               ", new limit=" PTR_FORMAT
-                               " (" SIZE_FORMAT "%%)",
-                               p2i(top), p2i(_icms_stop_limit),
-                               percent_of_space(space, _icms_stop_limit));
-      }
-      ConcurrentMarkSweepThread::start_icms();
-      assert(top < _icms_stop_limit, "Tautology");
-      if (word_size < pointer_delta(_icms_stop_limit, top)) {
-        return _icms_stop_limit;
-      }
-
-      // The allocation will cross both the _start and _stop limits, so do the
-      // stop notification also and return end().
-      if (CMSTraceIncrementalMode) {
-        space->print_on(gclog_or_tty);
-        gclog_or_tty->stamp();
-        gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
-                               ", new limit=" PTR_FORMAT
-                               " (" SIZE_FORMAT "%%)",
-                               p2i(top), p2i(space->end()),
-                               percent_of_space(space, space->end()));
-      }
-      ConcurrentMarkSweepThread::stop_icms();
-      return space->end();
-    }
-
-    if (top <= _icms_stop_limit) {
-      if (CMSTraceIncrementalMode) {
-        space->print_on(gclog_or_tty);
-        gclog_or_tty->stamp();
-        gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
-                               ", new limit=" PTR_FORMAT
-                               " (" SIZE_FORMAT "%%)",
-                               top, space->end(),
-                               percent_of_space(space, space->end()));
-      }
-      ConcurrentMarkSweepThread::stop_icms();
-      return space->end();
-    }
-
-    if (CMSTraceIncrementalMode) {
-      space->print_on(gclog_or_tty);
-      gclog_or_tty->stamp();
-      gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
-                             ", new limit=" PTR_FORMAT,
-                             top, NULL);
-    }
-  }
-
-  return NULL;
-}
-
 oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   // allocate, copy and if necessary update promoinfo --
@@ -1289,14 +1076,6 @@
 }
 
 
-HeapWord*
-ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
-                                             HeapWord* top,
-                                             size_t word_sz)
-{
-  return collector()->allocation_limit_reached(space, top, word_sz);
-}
-
 // IMPORTANT: Notes on object size recognition in CMS.
 // ---------------------------------------------------
 // A block of storage in the CMS generation is always in
@@ -1809,9 +1588,6 @@
   // we want to do a foreground collection.
   _foregroundGCIsActive = true;
 
-  // Disable incremental mode during a foreground collection.
-  ICMSDisabler icms_disabler;
-
   // release locks and wait for a notify from the background collector
   // releasing the locks in only necessary for phases which
   // do yields to improve the granularity of the collection.
@@ -2135,7 +1911,7 @@
 
 void CMSCollector::print_eden_and_survivor_chunk_arrays() {
   DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
-  EdenSpace* eden_space = dng->eden();
+  ContiguousSpace* eden_space = dng->eden();
   ContiguousSpace* from_space = dng->from();
   ContiguousSpace* to_space   = dng->to();
   // Eden
@@ -2783,10 +2559,6 @@
   //
   _cmsGen->update_counters(cms_used);
 
-  if (CMSIncrementalMode) {
-    icms_update_allocation_limits();
-  }
-
   bitMapLock()->unlock();
   releaseFreelistLocks();
 
@@ -4272,12 +4044,10 @@
   assert_lock_strong(_bit_map_lock);
   _bit_map_lock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
-  _collector->icms_wait();
 
   // It is possible for whichever thread initiated the yield request
   // not to get a chance to wake up and take the bitmap lock between
@@ -4307,7 +4077,6 @@
                    ConcurrentMarkSweepThread::should_yield() &&
                    !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
@@ -5238,7 +5007,7 @@
 
 void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
   DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
-  EdenSpace* eden_space = dng->eden();
+  ContiguousSpace* eden_space = dng->eden();
   ContiguousSpace* from_space = dng->from();
   ContiguousSpace* to_space   = dng->to();
 
@@ -5410,7 +5179,7 @@
     while (!pst->is_task_claimed(/* reference */ nth_task)) {
       // We claimed task # nth_task; compute its boundaries.
       if (chunk_top == 0) {  // no samples were taken
-        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
+        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
         start = space->bottom();
         end   = space->top();
       } else if (nth_task == 0) {
@@ -5788,7 +5557,7 @@
   // process_roots (which currently doesn't know how to
   // parallelize such a scan), but rather will be broken up into
   // a set of parallel tasks (via the sampling that the [abortable]
-  // preclean phase did of EdenSpace, plus the [two] tasks of
+  // preclean phase did of eden, plus the [two] tasks of
   // scanning the [two] survivor spaces. Further fine-grain
   // parallelization of the scanning of the survivor spaces
   // themselves, and of precleaning of the younger gen itself
@@ -6474,19 +6243,16 @@
         assert_lock_strong(bitMapLock());
         bitMapLock()->unlock();
         ConcurrentMarkSweepThread::desynchronize(true);
-        ConcurrentMarkSweepThread::acknowledge_yield_request();
         stopTimer();
         if (PrintCMSStatistics != 0) {
           incrementYields();
         }
-        icms_wait();
 
         // See the comment in coordinator_yield()
         for (unsigned i = 0; i < CMSYieldSleepCount &&
                          ConcurrentMarkSweepThread::should_yield() &&
                          !CMSCollector::foregroundGCIsActive(); ++i) {
           os::sleep(Thread::current(), 1, false);
-          ConcurrentMarkSweepThread::acknowledge_yield_request();
         }
 
         ConcurrentMarkSweepThread::synchronize(true);
@@ -6509,10 +6275,6 @@
     _collectorState = Idling;
   }
 
-  // Stop incremental mode after a cycle completes, so that any future cycles
-  // are triggered by allocation.
-  stop_icms();
-
   NOT_PRODUCT(
     if (RotateCMSCollectionTypes) {
       _cmsGen->rotate_debug_collection_type();
@@ -6964,12 +6726,10 @@
   _bit_map->lock()->unlock();
   _freelistLock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
-  _collector->icms_wait();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0;
@@ -6978,7 +6738,6 @@
        !CMSCollector::foregroundGCIsActive();
        ++i) {
     os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
@@ -7124,19 +6883,16 @@
   _bitMap->lock()->unlock();
   _freelistLock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
-  _collector->icms_wait();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
                    ConcurrentMarkSweepThread::should_yield() &&
                    !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
@@ -7196,19 +6952,16 @@
   // Relinquish the bit map lock
   _bit_map->lock()->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
-  _collector->icms_wait();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
                        ConcurrentMarkSweepThread::should_yield() &&
                        !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
@@ -7354,19 +7107,16 @@
   assert_lock_strong(_bitMap->lock());
   _bitMap->lock()->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
-  _collector->icms_wait();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
                        ConcurrentMarkSweepThread::should_yield() &&
                        !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
@@ -7388,7 +7138,7 @@
   _finger = ptr + obj->size();
   assert(_finger > ptr, "we just incremented it above");
   // On large heaps, it may take us some time to get through
-  // the marking phase (especially if running iCMS). During
+  // the marking phase. During
   // this time it's possible that a lot of mutations have
   // accumulated in the card table and the mod union table --
   // these mutation records are redundant until we have
@@ -7505,7 +7255,7 @@
   _finger = ptr + obj->size();
   assert(_finger > ptr, "we just incremented it above");
   // On large heaps, it may take us some time to get through
-  // the marking phase (especially if running iCMS). During
+  // the marking phase. During
   // this time it's possible that a lot of mutations have
   // accumulated in the card table and the mod union table --
   // these mutation records are redundant until we have
@@ -7994,20 +7744,16 @@
   bml->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
 
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
-
   _collector->stopTimer();
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
-  _collector->icms_wait();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
                        ConcurrentMarkSweepThread::should_yield() &&
                        !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
@@ -8675,19 +8421,16 @@
   _bitMap->lock()->unlock();
   _freelistLock->unlock();
   ConcurrentMarkSweepThread::desynchronize(true);
-  ConcurrentMarkSweepThread::acknowledge_yield_request();
   _collector->stopTimer();
   if (PrintCMSStatistics != 0) {
     _collector->incrementYields();
   }
-  _collector->icms_wait();
 
   // See the comment in coordinator_yield()
   for (unsigned i = 0; i < CMSYieldSleepCount &&
                        ConcurrentMarkSweepThread::should_yield() &&
                        !CMSCollector::foregroundGCIsActive(); ++i) {
     os::sleep(Thread::current(), 1, false);
-    ConcurrentMarkSweepThread::acknowledge_yield_request();
   }
 
   ConcurrentMarkSweepThread::synchronize(true);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -356,7 +356,6 @@
   size_t _gc0_promoted;         // bytes promoted per gc0
   double _cms_duration;
   double _cms_duration_pre_sweep; // time from initiation to start of sweep
-  double _cms_duration_per_mb;
   double _cms_period;
   size_t _cms_allocated;        // bytes of direct allocation per gc0 period
 
@@ -383,17 +382,7 @@
 
   unsigned int _valid_bits;
 
-  unsigned int _icms_duty_cycle;        // icms duty cycle (0-100).
-
  protected:
-
-  // Return a duty cycle that avoids wild oscillations, by limiting the amount
-  // of change between old_duty_cycle and new_duty_cycle (the latter is treated
-  // as a recommended value).
-  static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
-                                             unsigned int new_duty_cycle);
-  unsigned int icms_update_duty_cycle_impl();
-
   // In support of adjusting of cms trigger ratios based on history
   // of concurrent mode failure.
   double cms_free_adjustment_factor(size_t free) const;
@@ -426,7 +415,6 @@
   size_t gc0_promoted() const   { return _gc0_promoted; }
   double cms_period() const          { return _cms_period; }
   double cms_duration() const        { return _cms_duration; }
-  double cms_duration_per_mb() const { return _cms_duration_per_mb; }
   size_t cms_allocated() const       { return _cms_allocated; }
 
   size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
@@ -458,12 +446,6 @@
 
   // End of higher level statistics.
 
-  // Returns the cms incremental mode duty cycle, as a percentage (0-100).
-  unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
-
-  // Update the duty cycle and return the new value.
-  unsigned int icms_update_duty_cycle();
-
   // Debugging.
   void print_on(outputStream* st) const PRODUCT_RETURN;
   void print() const { print_on(gclog_or_tty); }
@@ -725,13 +707,6 @@
   // Timing, allocation and promotion statistics, used for scheduling.
   CMSStats      _stats;
 
-  // Allocation limits installed in the young gen, used only in
-  // CMSIncrementalMode.  When an allocation in the young gen would cross one of
-  // these limits, the cms generation is notified and the cms thread is started
-  // or stopped, respectively.
-  HeapWord*     _icms_start_limit;
-  HeapWord*     _icms_stop_limit;
-
   enum CMS_op_type {
     CMS_op_checkpointRootsInitial,
     CMS_op_checkpointRootsFinal
@@ -867,10 +842,6 @@
   // collector.
   bool waitForForegroundGC();
 
-  // Incremental mode triggering:  recompute the icms duty cycle and set the
-  // allocation limits in the young gen.
-  void icms_update_allocation_limits();
-
   size_t block_size_using_printezis_bits(HeapWord* addr) const;
   size_t block_size_if_printezis_bits(HeapWord* addr) const;
   HeapWord* next_card_start_after_block(HeapWord* addr) const;
@@ -928,9 +899,6 @@
   void promoted(bool par, HeapWord* start,
                 bool is_obj_array, size_t obj_size);
 
-  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
-                                     size_t word_size);
-
   void getFreelistLocks() const;
   void releaseFreelistLocks() const;
   bool haveFreelistLocks() const;
@@ -1001,14 +969,6 @@
   // Timers/stats for gc scheduling and incremental mode pacing.
   CMSStats& stats() { return _stats; }
 
-  // Convenience methods that check whether CMSIncrementalMode is enabled and
-  // forward to the corresponding methods in ConcurrentMarkSweepThread.
-  static void start_icms();
-  static void stop_icms();    // Called at the end of the cms cycle.
-  static void disable_icms(); // Called before a foreground collection.
-  static void enable_icms();  // Called after a foreground collection.
-  void icms_wait();          // Called at yield points.
-
   // Adaptive size policy
   AdaptiveSizePolicy* size_policy();
 
@@ -1211,9 +1171,6 @@
     return allocate(size, tlab);
   }
 
-  // Incremental mode triggering.
-  HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
-                                     size_t word_size);
 
   // Used by CMSStats to track direct allocation.  The value is sampled and
   // reset after each young gen collection.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -234,36 +234,6 @@
   }
 }
 
-inline void CMSCollector::start_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::start_icms();
-  }
-}
-
-inline void CMSCollector::stop_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::stop_icms();
-  }
-}
-
-inline void CMSCollector::disable_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::disable_icms();
-  }
-}
-
-inline void CMSCollector::enable_icms() {
-  if (CMSIncrementalMode) {
-    ConcurrentMarkSweepThread::enable_icms();
-  }
-}
-
-inline void CMSCollector::icms_wait() {
-  if (CMSIncrementalMode) {
-    cmsThread()->icms_wait();
-  }
-}
-
 inline void CMSCollector::save_sweep_limits() {
   _cmsGen->save_sweep_limit();
 }
@@ -363,12 +333,6 @@
   _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
     cur_duration, _cms_alpha);
 
-  // Avoid division by 0.
-  const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
-  _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
-                                 cur_duration / cms_used_mb,
-                                 _cms_alpha);
-
   _cms_end_time.update();
   _cms_alpha = _saved_alpha;
   _allow_duty_cycle_reduction = true;
@@ -400,15 +364,6 @@
   return (gc0_promoted() + cms_allocated()) / gc0_period();
 }
 
-inline unsigned int CMSStats::icms_update_duty_cycle() {
-  // Update the duty cycle only if pacing is enabled and the stats are valid
-  // (after at least one young gen gc and one cms cycle have completed).
-  if (CMSIncrementalPacing && valid()) {
-    return icms_update_duty_cycle_impl();
-  }
-  return _icms_duty_cycle;
-}
-
 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
   cmsSpace()->save_sweep_limit();
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -49,13 +49,6 @@
 int  ConcurrentMarkSweepThread::_CMS_flag         = CMS_nil;
 
 volatile jint ConcurrentMarkSweepThread::_pending_yields      = 0;
-volatile jint ConcurrentMarkSweepThread::_pending_decrements  = 0;
-
-volatile jint ConcurrentMarkSweepThread::_icms_disabled   = 0;
-volatile bool ConcurrentMarkSweepThread::_should_run     = false;
-// When icms is enabled, the icms thread is stopped until explicitly
-// started.
-volatile bool ConcurrentMarkSweepThread::_should_stop    = true;
 
 SurrogateLockerThread*
      ConcurrentMarkSweepThread::_slt = NULL;
@@ -99,7 +92,6 @@
     }
   }
   _sltMonitor = SLT_lock;
-  assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
 }
 
 void ConcurrentMarkSweepThread::run() {
@@ -184,11 +176,6 @@
 }
 
 void ConcurrentMarkSweepThread::stop() {
-  if (CMSIncrementalMode) {
-    // Disable incremental mode and wake up the thread so it notices the change.
-    disable_icms();
-    start_icms();
-  }
   // it is ok to take late safepoints here, if needed
   {
     MutexLockerEx x(Terminator_lock);
@@ -387,23 +374,13 @@
 
 void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
   while (!_should_terminate) {
-    if (CMSIncrementalMode) {
-      icms_wait();
-      if(CMSWaitDuration >= 0) {
-        // Wait until the next synchronous GC, a concurrent full gc
-        // request or a timeout, whichever is earlier.
-        wait_on_cms_lock_for_scavenge(CMSWaitDuration);
-      }
-      return;
+    if(CMSWaitDuration >= 0) {
+      // Wait until the next synchronous GC, a concurrent full gc
+      // request or a timeout, whichever is earlier.
+      wait_on_cms_lock_for_scavenge(CMSWaitDuration);
     } else {
-      if(CMSWaitDuration >= 0) {
-        // Wait until the next synchronous GC, a concurrent full gc
-        // request or a timeout, whichever is earlier.
-        wait_on_cms_lock_for_scavenge(CMSWaitDuration);
-      } else {
-        // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
-        wait_on_cms_lock(CMSCheckInterval);
-      }
+      // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
+      wait_on_cms_lock(CMSCheckInterval);
     }
     // Check if we should start a CMS collection cycle
     if (_collector->shouldConcurrentCollect()) {
@@ -414,42 +391,6 @@
   }
 }
 
-// Incremental CMS
-void ConcurrentMarkSweepThread::start_icms() {
-  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
-  MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
-  trace_state("start_icms");
-  _should_run = true;
-  iCMS_lock->notify_all();
-}
-
-void ConcurrentMarkSweepThread::stop_icms() {
-  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
-  MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
-  if (!_should_stop) {
-    trace_state("stop_icms");
-    _should_stop = true;
-    _should_run = false;
-    asynchronous_yield_request();
-    iCMS_lock->notify_all();
-  }
-}
-
-void ConcurrentMarkSweepThread::icms_wait() {
-  assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
-  if (_should_stop && icms_is_enabled()) {
-    MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
-    trace_state("pause_icms");
-    _collector->stats().stop_cms_timer();
-    while(!_should_run && icms_is_enabled()) {
-      iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
-    }
-    _collector->stats().start_cms_timer();
-    _should_stop = false;
-    trace_state("pause_icms end");
-  }
-}
-
 // Note: this method, although exported by the ConcurrentMarkSweepThread,
 // which is a non-JavaThread, can only be called by a JavaThread.
 // Currently this is done at vm creation time (post-vm-init) by the
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -64,20 +64,11 @@
   static bool clear_CMS_flag(int b)         { return (_CMS_flag &= ~b) != 0; }
   void sleepBeforeNextCycle();
 
-  // CMS thread should yield for a young gen collection, direct allocation,
-  // and iCMS activity.
+  // CMS thread should yield for a young gen collection and direct allocations
   static char _pad_1[64 - sizeof(jint)];    // prevent cache-line sharing
   static volatile jint _pending_yields;
-  static volatile jint _pending_decrements; // decrements to _pending_yields
   static char _pad_2[64 - sizeof(jint)];    // prevent cache-line sharing
 
-  // Tracing messages, enabled by CMSTraceThreadState.
-  static inline void trace_state(const char* desc);
-
-  static volatile int _icms_disabled;   // a counter to track #iCMS disable & enable
-  static volatile bool _should_run;     // iCMS may run
-  static volatile bool _should_stop;    // iCMS should stop
-
   // debugging
   void verify_ok_to_terminate() const PRODUCT_RETURN;
 
@@ -135,44 +126,13 @@
   void wait_on_cms_lock_for_scavenge(long t_millis);
 
   // The CMS thread will yield during the work portion of its cycle
-  // only when requested to.  Both synchronous and asychronous requests
-  // are provided:
-  // (1) A synchronous request is used for young gen collections and
-  //     for direct allocations.  The requesting thread increments
-  //     _pending_yields at the beginning of an operation, and decrements
-  //     _pending_yields when that operation is completed.
-  //     In turn, the CMS thread yields when _pending_yields is positive,
-  //     and continues to yield until the value reverts to 0.
-  // (2) An asynchronous request, on the other hand, is used by iCMS
-  //     for the stop_icms() operation. A single yield satisfies all of
-  //     the outstanding asynch yield requests, of which there may
-  //     occasionally be several in close succession. To accomplish
-  //     this, an asynch-requesting thread atomically increments both
-  //     _pending_yields and _pending_decrements. An asynchr requesting
-  //     thread does not wait and "acknowledge" completion of an operation
-  //     and deregister the request, like the synchronous version described
-  //     above does. In turn, after yielding, the CMS thread decrements both
-  //     _pending_yields and _pending_decrements by the value seen in
-  //     _pending_decrements before the decrement.
-  //  NOTE: The above scheme is isomorphic to having two request counters,
-  //  one for async requests and one for sync requests, and for the CMS thread
-  //  to check the sum of the two counters to decide whether it should yield
-  //  and to clear only the async counter when it yields. However, it turns out
-  //  to be more efficient for CMS code to just check a single counter
-  //  _pending_yields that holds the sum (of both sync and async requests), and
-  //  a second counter _pending_decrements that only holds the async requests,
-  //  for greater efficiency, since in a typical CMS run, there are many more
-  //  potential (i.e. static) yield points than there are actual
-  //  (i.e. dynamic) yields because of requests, which are few and far between.
-  //
-  // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
-  // we cannot easily test that invariant, since the counters are manipulated via
-  // atomic instructions without explicit locking and we cannot read
-  // the two counters atomically together: one suggestion is to
-  // use (for example) 16-bit counters so as to be able to read the
-  // two counters atomically even on 32-bit platforms. Notice that
-  // the second assert in acknowledge_yield_request() below does indeed
-  // check a form of the above invariant, albeit indirectly.
+  // only when requested to.
+  // A synchronous request is used for young gen collections and
+  // for direct allocations.  The requesting thread increments
+  // _pending_yields at the beginning of an operation, and decrements
+  // _pending_yields when that operation is completed.
+  // In turn, the CMS thread yields when _pending_yields is positive,
+  // and continues to yield until the value reverts to 0.
 
   static void increment_pending_yields()   {
     Atomic::inc(&_pending_yields);
@@ -182,67 +142,9 @@
     Atomic::dec(&_pending_yields);
     assert(_pending_yields >= 0, "can't be negative");
   }
-  static void asynchronous_yield_request() {
-    assert(CMSIncrementalMode, "Currently only used w/iCMS");
-    increment_pending_yields();
-    Atomic::inc(&_pending_decrements);
-    assert(_pending_decrements >= 0, "can't be negative");
-  }
-  static void acknowledge_yield_request() {
-    jint decrement = _pending_decrements;
-    if (decrement > 0) {
-      assert(CMSIncrementalMode, "Currently only used w/iCMS");
-      // Order important to preserve: _pending_yields >= _pending_decrements
-      Atomic::add(-decrement, &_pending_decrements);
-      Atomic::add(-decrement, &_pending_yields);
-      assert(_pending_decrements >= 0, "can't be negative");
-      assert(_pending_yields >= 0, "can't be negative");
-    }
-  }
   static bool should_yield()   { return _pending_yields > 0; }
-
-  // CMS incremental mode.
-  static void start_icms(); // notify thread to start a quantum of work
-  static void stop_icms();  // request thread to stop working
-  void icms_wait();         // if asked to stop, wait until notified to start
-
-  // Incremental mode is enabled globally by the flag CMSIncrementalMode.  It
-  // must also be enabled/disabled dynamically to allow foreground collections.
-#define ICMS_ENABLING_ASSERT                                                      \
-          assert((CMSIncrementalMode  && _icms_disabled >= 0) ||                  \
-                 (!CMSIncrementalMode && _icms_disabled <= 0), "Error")
-
-  static inline void enable_icms() {
-    ICMS_ENABLING_ASSERT;
-    Atomic::dec(&_icms_disabled);
-  }
-  static inline void disable_icms() {
-   ICMS_ENABLING_ASSERT;
-   Atomic::inc(&_icms_disabled);
-  }
-  static inline bool icms_is_disabled() {
-   ICMS_ENABLING_ASSERT;
-   return _icms_disabled > 0;
-  }
-  static inline bool icms_is_enabled() {
-   return !icms_is_disabled();
-  }
 };
 
-inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
-  if (CMSTraceThreadState) {
-    char buf[128];
-    TimeStamp& ts = gclog_or_tty->time_stamp();
-    if (!ts.is_updated()) {
-      ts.update();
-    }
-    jio_snprintf(buf, sizeof(buf), " [%.3f:  CMSThread %s] ",
-                 ts.seconds(), desc);
-    buf[sizeof(buf) - 1] = '\0';
-    gclog_or_tty->print("%s", buf);
-  }
-}
-
 // For scoped increment/decrement of (synchronous) yield requests
 class CMSSynchronousYieldRequest: public StackObj {
  public:
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -207,12 +207,6 @@
   MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
   if (gch->total_full_collections() == _full_gc_count_before) {
-    // Disable iCMS until the full collection is done, and
-    // remember that we did so.
-    CMSCollector::disable_icms();
-    _disabled_icms = true;
-    // In case CMS thread was in icms_wait(), wake it up.
-    CMSCollector::start_icms();
     // Nudge the CMS thread to start a concurrent collection.
     CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
   } else {
@@ -276,8 +270,4 @@
       FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
     }
   }
-  // Enable iCMS back if we disabled it earlier.
-  if (_disabled_icms) {
-    CMSCollector::enable_icms();
-  }
 }
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -128,13 +128,11 @@
 // VM operation to invoke a concurrent collection of the heap as a
 // GenCollectedHeap heap.
 class VM_GenCollectFullConcurrent: public VM_GC_Operation {
-  bool _disabled_icms;
  public:
   VM_GenCollectFullConcurrent(unsigned int gc_count_before,
                               unsigned int full_gc_count_before,
                               GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
-      _disabled_icms(false)
+    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
   {
     assert(FullGCCount_lock != NULL, "Error");
     assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1248,7 +1248,7 @@
   // The same as above but assume that the caller holds the Heap_lock.
   void collect_locked(GCCause::Cause cause);
 
-  virtual void copy_allocation_context_stats(const jint* contexts,
+  virtual bool copy_allocation_context_stats(const jint* contexts,
                                              jlong* totals,
                                              jbyte* accuracy,
                                              jint len);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap_ext.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -25,8 +25,9 @@
 #include "precompiled.hpp"
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 
-void G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
+bool G1CollectedHeap::copy_allocation_context_stats(const jint* contexts,
                                                     jlong* totals,
                                                     jbyte* accuracy,
                                                     jint len) {
+  return false;
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1585,34 +1585,22 @@
   }
 };
 
+uint G1CollectorPolicy::calculate_parallel_work_chunk_size(uint n_workers, uint n_regions) {
+  assert(n_workers > 0, "Active gc workers should be greater than 0");
+  const uint overpartition_factor = 4;
+  const uint min_chunk_size = MAX2(n_regions / n_workers, 1U);
+  return MAX2(n_regions / (n_workers * overpartition_factor), min_chunk_size);
+}
+
 void
-G1CollectorPolicy::record_concurrent_mark_cleanup_end(int no_of_gc_threads) {
+G1CollectorPolicy::record_concurrent_mark_cleanup_end(uint n_workers) {
   _collectionSetChooser->clear();
 
-  uint region_num = _g1->num_regions();
-  const uint OverpartitionFactor = 4;
-  uint WorkUnit;
-  // The use of MinChunkSize = 8 in the original code
-  // causes some assertion failures when the total number of
-  // region is less than 8.  The code here tries to fix that.
-  // Should the original code also be fixed?
-  if (no_of_gc_threads > 0) {
-    const uint MinWorkUnit = MAX2(region_num / no_of_gc_threads, 1U);
-    WorkUnit = MAX2(region_num / (no_of_gc_threads * OverpartitionFactor),
-                    MinWorkUnit);
-  } else {
-    assert(no_of_gc_threads > 0,
-      "The active gc workers should be greater than 0");
-    // In a product build do something reasonable to avoid a crash.
-    const uint MinWorkUnit = MAX2(region_num / (uint) ParallelGCThreads, 1U);
-    WorkUnit =
-      MAX2(region_num / (uint) (ParallelGCThreads * OverpartitionFactor),
-           MinWorkUnit);
-  }
-  _collectionSetChooser->prepare_for_par_region_addition(_g1->num_regions(),
-                                                         WorkUnit);
-  ParKnownGarbageTask parKnownGarbageTask(_collectionSetChooser, WorkUnit, (uint) no_of_gc_threads);
-  _g1->workers()->run_task(&parKnownGarbageTask);
+  uint n_regions = _g1->num_regions();
+  uint chunk_size = calculate_parallel_work_chunk_size(n_workers, n_regions);
+  _collectionSetChooser->prepare_for_par_region_addition(n_regions, chunk_size);
+  ParKnownGarbageTask par_known_garbage_task(_collectionSetChooser, chunk_size, n_workers);
+  _g1->workers()->run_task(&par_known_garbage_task);
 
   _collectionSetChooser->sort_regions();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -612,6 +612,10 @@
                                           uint desired_min_length,
                                           uint desired_max_length);
 
+  // Calculate and return chunk size (in number of regions) for parallel
+  // concurrent mark cleanup.
+  uint calculate_parallel_work_chunk_size(uint n_workers, uint n_regions);
+
   // Check whether a given young length (young_length) fits into the
   // given target pause time and whether the prediction for the amount
   // of objects to be copied for the given length will fit into the
@@ -687,7 +691,7 @@
 
   // Record start, end, and completion of cleanup.
   void record_concurrent_mark_cleanup_start();
-  void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
+  void record_concurrent_mark_cleanup_end(uint n_workers);
   void record_concurrent_mark_cleanup_completed();
 
   // Records the information about the heap size for reporting in
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -97,13 +97,6 @@
   FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl, mtGC);
 }
 
-void CountNonCleanMemRegionClosure::do_MemRegion(MemRegion mr) {
-  if (_g1->is_in_g1_reserved(mr.start())) {
-    _n += (int) ((mr.byte_size() / CardTableModRefBS::card_size));
-    if (_start_first == NULL) _start_first = mr.start();
-  }
-}
-
 class ScanRSClosure : public HeapRegionClosure {
   size_t _cards_done, _cards;
   G1CollectedHeap* _g1h;
@@ -303,15 +296,6 @@
 
   _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
 
-  // Now there should be no dirty cards.
-  if (G1RSLogCheckCardTable) {
-    CountNonCleanMemRegionClosure cl(_g1);
-    _ct_bs->mod_card_iterate(&cl);
-    // XXX This isn't true any more: keeping cards of young regions
-    // marked dirty broke it.  Need some reasonable fix.
-    guarantee(cl.n() == 0, "Card table should be clean.");
-  }
-
   _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -151,19 +151,6 @@
   }
 };
 
-class CountNonCleanMemRegionClosure: public MemRegionClosure {
-  G1CollectedHeap* _g1;
-  int _n;
-  HeapWord* _start_first;
-public:
-  CountNonCleanMemRegionClosure(G1CollectedHeap* g1) :
-    _g1(g1), _n(0), _start_first(NULL)
-  {}
-  void do_MemRegion(MemRegion mr);
-  int n() { return _n; };
-  HeapWord* start_first() { return _start_first; }
-};
-
 class UpdateRSOopClosure: public ExtendedOopClosure {
   HeapRegion* _from;
   G1RemSet* _rs;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1_globals.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -108,10 +108,6 @@
   develop(bool, G1RSBarrierRegionFilter, true,                              \
           "If true, generate region filtering code in RS barrier")          \
                                                                             \
-  develop(bool, G1RSLogCheckCardTable, false,                               \
-          "If true, verify that no dirty cards remain after RS log "        \
-          "processing.")                                                    \
-                                                                            \
   diagnostic(bool, G1PrintRegionLivenessInfo, false,                        \
             "Prints the liveness information for all regions in the heap "  \
             "at the end of a marking cycle.")                               \
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -960,6 +960,10 @@
   verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
 }
 
+void HeapRegion::prepare_for_compaction(CompactPoint* cp) {
+  scan_and_forward(this, cp);
+}
+
 // G1OffsetTableContigSpace code; copied from space.cpp.  Hope this can go
 // away eventually.
 
@@ -1043,12 +1047,6 @@
   }
 }
 
-#define block_is_always_obj(q) true
-void G1OffsetTableContigSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp, top, block_is_always_obj, block_size);
-}
-#undef block_is_always_obj
-
 G1OffsetTableContigSpace::
 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
                          MemRegion mr) :
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -187,8 +187,6 @@
   HeapWord* block_start(const void* p);
   HeapWord* block_start_const(const void* p) const;
 
-  void prepare_for_compaction(CompactPoint* cp);
-
   // Add offset table update.
   virtual HeapWord* allocate(size_t word_size);
   HeapWord* par_allocate(size_t word_size);
@@ -210,6 +208,9 @@
 
 class HeapRegion: public G1OffsetTableContigSpace {
   friend class VMStructs;
+  // Allow scan_and_forward to call (private) overrides for auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
  private:
 
   // The remembered set for this region.
@@ -219,6 +220,20 @@
 
   G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
 
+  // Auxiliary functions for scan_and_forward support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return top();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return true; // Always true, since scan_limit is top
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    return HeapRegion::block_size(addr); // Avoid virtual call
+  }
+
  protected:
   // The index of this region in the heap region sequence.
   uint  _hrm_index;
@@ -340,6 +355,9 @@
   // and the amount of unallocated words if called on top()
   size_t block_size(const HeapWord* p) const;
 
+  // Override for scan_and_forward support.
+  void prepare_for_compaction(CompactPoint* cp);
+
   inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
   inline HeapWord* allocate_no_bot_updates(size_t word_size);
 
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -426,11 +426,19 @@
                                          mtGC);
   G1BlockOffsetSharedArray oa(heap, bot_storage);
   bot_storage->commit_regions(0, num_regions_in_test);
-  HeapRegion hr0(0, &oa, heap);
-  HeapRegion hr1(1, &oa, heap);
-  HeapRegion hr2(2, &oa, heap);
-  HeapRegion hr3(3, &oa, heap);
-  HeapRegion hr4(4, &oa, heap);
+
+  // Set up memory regions for the heap regions.
+  MemRegion mr0(heap.start(), HeapRegion::GrainWords);
+  MemRegion mr1(mr0.end(), HeapRegion::GrainWords);
+  MemRegion mr2(mr1.end(), HeapRegion::GrainWords);
+  MemRegion mr3(mr2.end(), HeapRegion::GrainWords);
+  MemRegion mr4(mr3.end(), HeapRegion::GrainWords);
+
+  HeapRegion hr0(0, &oa, mr0);
+  HeapRegion hr1(1, &oa, mr1);
+  HeapRegion hr2(2, &oa, mr2);
+  HeapRegion hr3(3, &oa, mr3);
+  HeapRegion hr4(4, &oa, mr4);
   l.add_ordered(&hr1);
   l.add_ordered(&hr0);
   l.add_ordered(&hr3);
--- a/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_implementation/shared/markSweep.inline.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -27,6 +27,7 @@
 
 #include "gc_implementation/shared/markSweep.hpp"
 #include "gc_interface/collectedHeap.hpp"
+#include "oops/markOop.inline.hpp"
 #include "utilities/stack.inline.hpp"
 #include "utilities/macros.hpp"
 #if INCLUDE_ALL_GCS
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -644,10 +644,13 @@
   // For each context in contexts, set the corresponding entries in the totals
   // and accuracy arrays to the current values held by the statistics.  Each
   // array should be of length len.
-  virtual void copy_allocation_context_stats(const jint* contexts,
+  // Returns true if there are more stats available.
+  virtual bool copy_allocation_context_stats(const jint* contexts,
                                              jlong* totals,
                                              jbyte* accuracy,
-                                             jint len) { }
+                                             jint len) {
+    return false;
+  }
 
   /////////////// Unit tests ///////////////
 
--- a/hotspot/src/share/vm/interpreter/bytecodes.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/interpreter/bytecodes.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -401,8 +401,10 @@
   static bool        is_astore      (Code code)    { return (code == _astore || code == _astore_0 || code == _astore_1
                                                                              || code == _astore_2 || code == _astore_3); }
 
+  static bool        is_const       (Code code)    { return (_aconst_null <= code && code <= _ldc2_w); }
   static bool        is_zero_const  (Code code)    { return (code == _aconst_null || code == _iconst_0
                                                            || code == _fconst_0 || code == _dconst_0); }
+  static bool        is_return      (Code code)    { return (_ireturn <= code && code <= _return); }
   static bool        is_invoke      (Code code)    { return (_invokevirtual <= code && code <= _invokedynamic); }
   static bool        has_receiver   (Code code)    { assert(is_invoke(code), "");  return code == _invokevirtual ||
                                                                                           code == _invokespecial ||
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -189,11 +189,6 @@
     return CollectorPolicy::CollectorPolicyKind;
   }
 
-  // Returns true if a collector has eden space with soft end.
-  virtual bool has_soft_ended_eden() {
-    return false;
-  }
-
   // Do any updates required to global flags that are due to heap initialization
   // changes
   virtual void post_heap_initialize() = 0;
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -194,11 +194,7 @@
                 (HeapWord*)_virtual_space.high());
   Universe::heap()->barrier_set()->resize_covered_region(cmr);
 
-  if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
-    _eden_space = new ConcEdenSpace(this);
-  } else {
-    _eden_space = new EdenSpace(this);
-  }
+  _eden_space = new ContiguousSpace();
   _from_space = new ContiguousSpace();
   _to_space   = new ContiguousSpace();
 
@@ -1038,38 +1034,12 @@
     if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
       _next_gen->sample_eden_chunk();
     }
-    return result;
-  }
-  do {
-    HeapWord* old_limit = eden()->soft_end();
-    if (old_limit < eden()->end()) {
-      // Tell the next generation we reached a limit.
-      HeapWord* new_limit =
-        next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
-      if (new_limit != NULL) {
-        Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
-      } else {
-        assert(eden()->soft_end() == eden()->end(),
-               "invalid state after allocation_limit_reached returned null");
-      }
-    } else {
-      // The allocation failed and the soft limit is equal to the hard limit,
-      // there are no reasons to do an attempt to allocate
-      assert(old_limit == eden()->end(), "sanity check");
-      break;
-    }
-    // Try to allocate until succeeded or the soft limit can't be adjusted
-    result = eden()->par_allocate(word_size);
-  } while (result == NULL);
-
-  // If the eden is full and the last collection bailed out, we are running
-  // out of heap space, and we try to allocate the from-space, too.
-  // allocate_from_space can't be inlined because that would introduce a
-  // circular dependency at compile time.
-  if (result == NULL) {
+  } else {
+    // If the eden is full and the last collection bailed out, we are running
+    // out of heap space, and we try to allocate the from-space, too.
+    // allocate_from_space can't be inlined because that would introduce a
+    // circular dependency at compile time.
     result = allocate_from_space(word_size);
-  } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
-    _next_gen->sample_eden_chunk();
   }
   return result;
 }
@@ -1083,11 +1053,6 @@
   return res;
 }
 
-void DefNewGeneration::gc_prologue(bool full) {
-  // Ensure that _end and _soft_end are the same in eden space.
-  eden()->set_soft_end(eden()->end());
-}
-
 size_t DefNewGeneration::tlab_capacity() const {
   return eden()->capacity();
 }
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -32,7 +32,6 @@
 #include "memory/generation.inline.hpp"
 #include "utilities/stack.hpp"
 
-class EdenSpace;
 class ContiguousSpace;
 class ScanClosure;
 class STWGCTimer;
@@ -132,7 +131,7 @@
   void adjust_desired_tenuring_threshold();
 
   // Spaces
-  EdenSpace*       _eden_space;
+  ContiguousSpace* _eden_space;
   ContiguousSpace* _from_space;
   ContiguousSpace* _to_space;
 
@@ -214,9 +213,9 @@
   virtual Generation::Name kind() { return Generation::DefNew; }
 
   // Accessing spaces
-  EdenSpace*       eden() const           { return _eden_space; }
-  ContiguousSpace* from() const           { return _from_space;  }
-  ContiguousSpace* to()   const           { return _to_space;    }
+  ContiguousSpace* eden() const           { return _eden_space; }
+  ContiguousSpace* from() const           { return _from_space; }
+  ContiguousSpace* to()   const           { return _to_space;   }
 
   virtual CompactibleSpace* first_compaction_space() const;
 
@@ -282,8 +281,6 @@
 
   HeapWord* par_allocate(size_t word_size, bool is_tlab);
 
-  // Prologue & Epilogue
-  virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
 
   // Save the tops for eden, from, and to
--- a/hotspot/src/share/vm/memory/generation.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/generation.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -265,14 +265,6 @@
   // Like "allocate", but performs any necessary locking internally.
   virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
 
-  // A 'younger' gen has reached an allocation limit, and uses this to notify
-  // the next older gen.  The return value is a new limit, or NULL if none.  The
-  // caller must do the necessary locking.
-  virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
-                                             size_t word_size) {
-    return NULL;
-  }
-
   // Some generation may offer a region for shared, contiguous allocation,
   // via inlined code (by exporting the address of the top and end fields
   // defining the extent of the contiguous allocation region.)
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -715,15 +715,17 @@
     if (class_list_path_len >= 3) {
       if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
         if (class_list_path_len < JVM_MAXPATHLEN - 4) {
-          strncat(class_list_path_str, os::file_separator(), 1);
-          strncat(class_list_path_str, "lib", 3);
+          jio_snprintf(class_list_path_str + class_list_path_len,
+                       sizeof(class_list_path_str) - class_list_path_len,
+                       "%slib", os::file_separator());
+          class_list_path_len += 4;
         }
       }
     }
-    class_list_path_len = (int)strlen(class_list_path_str);
     if (class_list_path_len < JVM_MAXPATHLEN - 10) {
-      strncat(class_list_path_str, os::file_separator(), 1);
-      strncat(class_list_path_str, "classlist", 9);
+      jio_snprintf(class_list_path_str + class_list_path_len,
+                   sizeof(class_list_path_str) - class_list_path_len,
+                   "%sclasslist", os::file_separator());
     }
     class_list_path = class_list_path_str;
   } else {
@@ -851,7 +853,7 @@
     ik->link_class(THREAD);
     if (HAS_PENDING_EXCEPTION) {
       ResourceMark rm;
-      tty->print_cr("Preload Error: Verification failed for %s",
+      tty->print_cr("Preload Warning: Verification failed for %s",
                     ik->external_name());
       CLEAR_PENDING_EXCEPTION;
       ik->set_in_error_state();
--- a/hotspot/src/share/vm/memory/space.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/space.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -438,52 +438,8 @@
   }
 }
 
-#define block_is_always_obj(q) true
-#define obj_size(q) oop(q)->size()
-#define adjust_obj_size(s) s
-
-void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
-}
-
-// Faster object search.
 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
-  SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
-}
-
-void Space::adjust_pointers() {
-  // adjust all the interior pointers to point at the new locations of objects
-  // Used by MarkSweep::mark_sweep_phase3()
-
-  // First check to see if there is any work to be done.
-  if (used() == 0) {
-    return;  // Nothing to do.
-  }
-
-  // Otherwise...
-  HeapWord* q = bottom();
-  HeapWord* t = end();
-
-  debug_only(HeapWord* prev_q = NULL);
-  while (q < t) {
-    if (oop(q)->is_gc_marked()) {
-      // q is alive
-
-      // point all the oops to the new location
-      size_t size = oop(q)->adjust_pointers();
-
-      debug_only(prev_q = q);
-
-      q += size;
-    } else {
-      // q is not a live object.  But we're not in a compactible space,
-      // So we don't have live ranges.
-      debug_only(prev_q = q);
-      q += block_size(q);
-      assert(q > prev_q, "we should be moving forward through memory");
-    }
-  }
-  assert(q == t, "just checking");
+  scan_and_forward(this, cp);
 }
 
 void CompactibleSpace::adjust_pointers() {
@@ -492,11 +448,11 @@
     return;   // Nothing to do.
   }
 
-  SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
+  scan_and_adjust_pointers(this);
 }
 
 void CompactibleSpace::compact() {
-  SCAN_AND_COMPACT(obj_size);
+  scan_and_compact(this);
 }
 
 void Space::print_short() const { print_short_on(tty); }
@@ -684,13 +640,12 @@
 }
 
 // This version requires locking.
-inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
-                                                HeapWord* const end_value) {
+inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
   assert(Heap_lock->owned_by_self() ||
          (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
          "not locked");
   HeapWord* obj = top();
-  if (pointer_delta(end_value, obj) >= size) {
+  if (pointer_delta(end(), obj) >= size) {
     HeapWord* new_top = obj + size;
     set_top(new_top);
     assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
@@ -701,11 +656,10 @@
 }
 
 // This version is lock-free.
-inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
-                                                    HeapWord* const end_value) {
+inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
   do {
     HeapWord* obj = top();
-    if (pointer_delta(end_value, obj) >= size) {
+    if (pointer_delta(end(), obj) >= size) {
       HeapWord* new_top = obj + size;
       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
       // result can be one of two:
@@ -744,12 +698,12 @@
 
 // Requires locking.
 HeapWord* ContiguousSpace::allocate(size_t size) {
-  return allocate_impl(size, end());
+  return allocate_impl(size);
 }
 
 // Lock-free.
 HeapWord* ContiguousSpace::par_allocate(size_t size) {
-  return par_allocate_impl(size, end());
+  return par_allocate_impl(size);
 }
 
 void ContiguousSpace::allocate_temporary_filler(int factor) {
@@ -784,49 +738,6 @@
   }
 }
 
-void EdenSpace::clear(bool mangle_space) {
-  ContiguousSpace::clear(mangle_space);
-  set_soft_end(end());
-}
-
-// Requires locking.
-HeapWord* EdenSpace::allocate(size_t size) {
-  return allocate_impl(size, soft_end());
-}
-
-// Lock-free.
-HeapWord* EdenSpace::par_allocate(size_t size) {
-  return par_allocate_impl(size, soft_end());
-}
-
-HeapWord* ConcEdenSpace::par_allocate(size_t size)
-{
-  do {
-    // The invariant is top() should be read before end() because
-    // top() can't be greater than end(), so if an update of _soft_end
-    // occurs between 'end_val = end();' and 'top_val = top();' top()
-    // also can grow up to the new end() and the condition
-    // 'top_val > end_val' is true. To ensure the loading order
-    // OrderAccess::loadload() is required after top() read.
-    HeapWord* obj = top();
-    OrderAccess::loadload();
-    if (pointer_delta(*soft_end_addr(), obj) >= size) {
-      HeapWord* new_top = obj + size;
-      HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
-      // result can be one of two:
-      //  the old top value: the exchange succeeded
-      //  otherwise: the new value of the top is returned.
-      if (result == obj) {
-        assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
-        return obj;
-      }
-    } else {
-      return NULL;
-    }
-  } while (true);
-}
-
-
 HeapWord* OffsetTableContigSpace::initialize_threshold() {
   return _offsets.initialize_threshold();
 }
--- a/hotspot/src/share/vm/memory/space.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/space.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -41,19 +41,6 @@
 // implementations for keeping track of free and used space,
 // for iterating over objects and free blocks, etc.
 
-// Here's the Space hierarchy:
-//
-// - Space               -- an abstract base class describing a heap area
-//   - CompactibleSpace  -- a space supporting compaction
-//     - CompactibleFreeListSpace -- (used for CMS generation)
-//     - ContiguousSpace -- a compactible space in which all free space
-//                          is contiguous
-//       - EdenSpace     -- contiguous space used as nursery
-//         - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
-//       - OffsetTableContigSpace -- contiguous space with a block offset array
-//                          that allows "fast" block_start calls
-//         - TenuredSpace -- (used for TenuredGeneration)
-
 // Forward decls.
 class Space;
 class BlockOffsetArray;
@@ -238,7 +225,7 @@
 
   // Mark-sweep-compact support: all spaces can update pointers to objects
   // moving as a part of compaction.
-  virtual void adjust_pointers();
+  virtual void adjust_pointers() = 0;
 
   // PrintHeapAtGC support
   virtual void print() const;
@@ -339,7 +326,36 @@
 // necessarily, a space that is normally contiguous.  But, for example, a
 // free-list-based space whose normal collection is a mark-sweep without
 // compaction could still support compaction in full GC's.
-
+//
+// The compaction operations are implemented by the
+// scan_and_{adjust_pointers,compact,forward} function templates.
+// The following are, non-virtual, auxiliary functions used by these function templates:
+// - scan_limit()
+// - scanned_block_is_obj()
+// - scanned_block_size()
+// - adjust_obj_size()
+// - obj_size()
+// These functions are to be used exclusively by the scan_and_* function templates,
+// and must be defined for all (non-abstract) subclasses of CompactibleSpace.
+//
+// NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior
+// in any of the auxiliary functions must also override the corresponding
+// prepare_for_compaction/adjust_pointers/compact functions using them.
+// If not, such changes will not be used or have no effect on the compaction operations.
+//
+// This translates to the following dependencies:
+// Overrides/definitions of
+//  - scan_limit
+//  - scanned_block_is_obj
+//  - scanned_block_size
+// require override/definition of prepare_for_compaction().
+// Similar dependencies exist between
+//  - adjust_obj_size  and adjust_pointers()
+//  - obj_size         and compact().
+//
+// Additionally, this also means that changes to block_size() or block_is_obj() that
+// should be effective during the compaction operations must provide a corresponding
+// definition of scanned_block_size/scanned_block_is_obj respectively.
 class CompactibleSpace: public Space {
   friend class VMStructs;
   friend class CompactibleFreeListSpace;
@@ -347,6 +363,15 @@
   HeapWord* _compaction_top;
   CompactibleSpace* _next_compaction_space;
 
+  // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
+  inline size_t adjust_obj_size(size_t size) const {
+    return size;
+  }
+
+  inline size_t obj_size(const HeapWord* addr) const {
+    return oop(addr)->size();
+  }
+
 public:
   CompactibleSpace() :
    _compaction_top(NULL), _next_compaction_space(NULL) {}
@@ -390,7 +415,7 @@
   // "cp->compaction_space" up-to-date.  Offset tables may be updated in
   // this phase as if the final copy had occurred; if so, "cp->threshold"
   // indicates when the next such action should be taken.
-  virtual void prepare_for_compaction(CompactPoint* cp);
+  virtual void prepare_for_compaction(CompactPoint* cp) = 0;
   // MarkSweep support phase3
   virtual void adjust_pointers();
   // MarkSweep support phase4
@@ -449,6 +474,25 @@
   // words remaining after this operation.
   bool insert_deadspace(size_t& allowed_deadspace_words, HeapWord* q,
                         size_t word_len);
+
+  // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
+  // The space argument should be a subclass of CompactibleSpace, implementing
+  // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
+  // and possibly also overriding obj_size(), and adjust_obj_size().
+  // These functions should avoid virtual calls whenever possible.
+
+  // Frequently calls adjust_obj_size().
+  template <class SpaceType>
+  static inline void scan_and_adjust_pointers(SpaceType* space);
+
+  // Frequently calls obj_size().
+  template <class SpaceType>
+  static inline void scan_and_compact(SpaceType* space);
+
+  // Frequently calls scanned_block_is_obj() and scanned_block_size().
+  // Requires the scan_limit() function.
+  template <class SpaceType>
+  static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
 };
 
 class GenSpaceMangler;
@@ -458,6 +502,25 @@
 class ContiguousSpace: public CompactibleSpace {
   friend class OneContigSpaceCardGeneration;
   friend class VMStructs;
+  // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
+  template <typename SpaceType>
+  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
+
+ private:
+  // Auxiliary functions for scan_and_forward support.
+  // See comments for CompactibleSpace for more information.
+  inline HeapWord* scan_limit() const {
+    return top();
+  }
+
+  inline bool scanned_block_is_obj(const HeapWord* addr) const {
+    return true; // Always true, since scan_limit is top
+  }
+
+  inline size_t scanned_block_size(const HeapWord* addr) const {
+    return oop(addr)->size();
+  }
+
  protected:
   HeapWord* _top;
   HeapWord* _concurrent_iteration_safe_limit;
@@ -467,8 +530,8 @@
   GenSpaceMangler* mangler() { return _mangler; }
 
   // Allocation helpers (return NULL if full).
-  inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
-  inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
+  inline HeapWord* allocate_impl(size_t word_size);
+  inline HeapWord* par_allocate_impl(size_t word_size);
 
  public:
   ContiguousSpace();
@@ -622,7 +685,6 @@
   // Used to increase collection frequency.  "factor" of 0 means entire
   // space.
   void allocate_temporary_filler(int factor);
-
 };
 
 
@@ -685,56 +747,6 @@
   {}
 };
 
-
-// Class EdenSpace describes eden-space in new generation.
-
-class DefNewGeneration;
-
-class EdenSpace : public ContiguousSpace {
-  friend class VMStructs;
- private:
-  DefNewGeneration* _gen;
-
-  // _soft_end is used as a soft limit on allocation.  As soft limits are
-  // reached, the slow-path allocation code can invoke other actions and then
-  // adjust _soft_end up to a new soft limit or to end().
-  HeapWord* _soft_end;
-
- public:
-  EdenSpace(DefNewGeneration* gen) :
-   _gen(gen), _soft_end(NULL) {}
-
-  // Get/set just the 'soft' limit.
-  HeapWord* soft_end()               { return _soft_end; }
-  HeapWord** soft_end_addr()         { return &_soft_end; }
-  void set_soft_end(HeapWord* value) { _soft_end = value; }
-
-  // Override.
-  void clear(bool mangle_space);
-
-  // Set both the 'hard' and 'soft' limits (_end and _soft_end).
-  void set_end(HeapWord* value) {
-    set_soft_end(value);
-    ContiguousSpace::set_end(value);
-  }
-
-  // Allocation (return NULL if full)
-  HeapWord* allocate(size_t word_size);
-  HeapWord* par_allocate(size_t word_size);
-};
-
-// Class ConcEdenSpace extends EdenSpace for the sake of safe
-// allocation while soft-end is being modified concurrently
-
-class ConcEdenSpace : public EdenSpace {
- public:
-  ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
-
-  // Allocation (return NULL if full)
-  HeapWord* par_allocate(size_t word_size);
-};
-
-
 // A ContigSpace that Supports an efficient "block_start" operation via
 // a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
 // other spaces.)  This is the abstract base class for old generation
--- a/hotspot/src/share/vm/memory/space.inline.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/memory/space.inline.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -25,6 +25,9 @@
 #ifndef SHARE_VM_MEMORY_SPACE_INLINE_HPP
 #define SHARE_VM_MEMORY_SPACE_INLINE_HPP
 
+#include "gc_implementation/shared/liveRange.hpp"
+#include "gc_implementation/shared/markSweep.inline.hpp"
+#include "gc_implementation/shared/spaceDecorator.hpp"
 #include "gc_interface/collectedHeap.hpp"
 #include "memory/space.hpp"
 #include "memory/universe.hpp"
@@ -35,272 +38,6 @@
   return block_start_const(p);
 }
 
-#define SCAN_AND_FORWARD(cp,scan_limit,block_is_obj,block_size) {            \
-  /* Compute the new addresses for the live objects and store it in the mark \
-   * Used by universe::mark_sweep_phase2()                                   \
-   */                                                                        \
-  HeapWord* compact_top; /* This is where we are currently compacting to. */ \
-                                                                             \
-  /* We're sure to be here before any objects are compacted into this        \
-   * space, so this is a good time to initialize this:                       \
-   */                                                                        \
-  set_compaction_top(bottom());                                              \
-                                                                             \
-  if (cp->space == NULL) {                                                   \
-    assert(cp->gen != NULL, "need a generation");                            \
-    assert(cp->threshold == NULL, "just checking");                          \
-    assert(cp->gen->first_compaction_space() == this, "just checking");      \
-    cp->space = cp->gen->first_compaction_space();                           \
-    compact_top = cp->space->bottom();                                       \
-    cp->space->set_compaction_top(compact_top);                              \
-    cp->threshold = cp->space->initialize_threshold();                       \
-  } else {                                                                   \
-    compact_top = cp->space->compaction_top();                               \
-  }                                                                          \
-                                                                             \
-  /* We allow some amount of garbage towards the bottom of the space, so     \
-   * we don't start compacting before there is a significant gain to be made.\
-   * Occasionally, we want to ensure a full compaction, which is determined  \
-   * by the MarkSweepAlwaysCompactCount parameter.                           \
-   */                                                                        \
-  uint invocations = MarkSweep::total_invocations();                         \
-  bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);       \
-                                                                             \
-  size_t allowed_deadspace = 0;                                              \
-  if (skip_dead) {                                                           \
-    const size_t ratio = allowed_dead_ratio();                               \
-    allowed_deadspace = (capacity() * ratio / 100) / HeapWordSize;           \
-  }                                                                          \
-                                                                             \
-  HeapWord* q = bottom();                                                    \
-  HeapWord* t = scan_limit();                                                \
-                                                                             \
-  HeapWord*  end_of_live= q;    /* One byte beyond the last byte of the last \
-                                   live object. */                           \
-  HeapWord*  first_dead = end();/* The first dead object. */                 \
-  LiveRange* liveRange  = NULL; /* The current live range, recorded in the   \
-                                   first header of preceding free area. */   \
-  _first_dead = first_dead;                                                  \
-                                                                             \
-  const intx interval = PrefetchScanIntervalInBytes;                         \
-                                                                             \
-  while (q < t) {                                                            \
-    assert(!block_is_obj(q) ||                                               \
-           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||   \
-           oop(q)->mark()->has_bias_pattern(),                               \
-           "these are the only valid states during a mark sweep");           \
-    if (block_is_obj(q) && oop(q)->is_gc_marked()) {                         \
-      /* prefetch beyond q */                                                \
-      Prefetch::write(q, interval);                                          \
-      size_t size = block_size(q);                                           \
-      compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
-      q += size;                                                             \
-      end_of_live = q;                                                       \
-    } else {                                                                 \
-      /* run over all the contiguous dead objects */                         \
-      HeapWord* end = q;                                                     \
-      do {                                                                   \
-        /* prefetch beyond end */                                            \
-        Prefetch::write(end, interval);                                      \
-        end += block_size(end);                                              \
-      } while (end < t && (!block_is_obj(end) || !oop(end)->is_gc_marked()));\
-                                                                             \
-      /* see if we might want to pretend this object is alive so that        \
-       * we don't have to compact quite as often.                            \
-       */                                                                    \
-      if (allowed_deadspace > 0 && q == compact_top) {                       \
-        size_t sz = pointer_delta(end, q);                                   \
-        if (insert_deadspace(allowed_deadspace, q, sz)) {                    \
-          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);     \
-          q = end;                                                           \
-          end_of_live = end;                                                 \
-          continue;                                                          \
-        }                                                                    \
-      }                                                                      \
-                                                                             \
-      /* otherwise, it really is a free region. */                           \
-                                                                             \
-      /* for the previous LiveRange, record the end of the live objects. */  \
-      if (liveRange) {                                                       \
-        liveRange->set_end(q);                                               \
-      }                                                                      \
-                                                                             \
-      /* record the current LiveRange object.                                \
-       * liveRange->start() is overlaid on the mark word.                    \
-       */                                                                    \
-      liveRange = (LiveRange*)q;                                             \
-      liveRange->set_start(end);                                             \
-      liveRange->set_end(end);                                               \
-                                                                             \
-      /* see if this is the first dead region. */                            \
-      if (q < first_dead) {                                                  \
-        first_dead = q;                                                      \
-      }                                                                      \
-                                                                             \
-      /* move on to the next object */                                       \
-      q = end;                                                               \
-    }                                                                        \
-  }                                                                          \
-                                                                             \
-  assert(q == t, "just checking");                                           \
-  if (liveRange != NULL) {                                                   \
-    liveRange->set_end(q);                                                   \
-  }                                                                          \
-  _end_of_live = end_of_live;                                                \
-  if (end_of_live < first_dead) {                                            \
-    first_dead = end_of_live;                                                \
-  }                                                                          \
-  _first_dead = first_dead;                                                  \
-                                                                             \
-  /* save the compaction_top of the compaction space. */                     \
-  cp->space->set_compaction_top(compact_top);                                \
-}
-
-#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
-  /* adjust all the interior pointers to point at the new locations of objects  \
-   * Used by MarkSweep::mark_sweep_phase3() */                                  \
-                                                                                \
-  HeapWord* q = bottom();                                                       \
-  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
-                                                                                \
-  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
-                                                                                \
-  if (q < t && _first_dead > q &&                                               \
-      !oop(q)->is_gc_marked()) {                                                \
-    /* we have a chunk of the space which hasn't moved and we've                \
-     * reinitialized the mark word during the previous pass, so we can't        \
-     * use is_gc_marked for the traversal. */                                   \
-    HeapWord* end = _first_dead;                                                \
-                                                                                \
-    while (q < end) {                                                           \
-      /* I originally tried to conjoin "block_start(q) == q" to the             \
-       * assertion below, but that doesn't work, because you can't              \
-       * accurately traverse previous objects to get to the current one         \
-       * after their pointers have been                                         \
-       * updated, until the actual compaction is done.  dld, 4/00 */            \
-      assert(block_is_obj(q),                                                   \
-             "should be at block boundaries, and should be looking at objs");   \
-                                                                                \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
-                                                                                \
-      q += size;                                                                \
-    }                                                                           \
-                                                                                \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ This is funky.  Using this to read the previously written          \
-       * LiveRange.  See also use below. */                                     \
-      q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  const intx interval = PrefetchScanIntervalInBytes;                            \
-                                                                                \
-  debug_only(HeapWord* prev_q = NULL);                                          \
-  while (q < t) {                                                               \
-    /* prefetch beyond q */                                                     \
-    Prefetch::write(q, interval);                                               \
-    if (oop(q)->is_gc_marked()) {                                               \
-      /* q is alive */                                                          \
-      /* point all the oops to the new location */                              \
-      size_t size = oop(q)->adjust_pointers();                                  \
-      size = adjust_obj_size(size);                                             \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    } else {                                                                    \
-      /* q is not a live object, so its mark should point at the next           \
-       * live object */                                                         \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  assert(q == t, "just checking");                                              \
-}
-
-#define SCAN_AND_COMPACT(obj_size) {                                            \
-  /* Copy all live objects to their new location                                \
-   * Used by MarkSweep::mark_sweep_phase4() */                                  \
-                                                                                \
-  HeapWord*       q = bottom();                                                 \
-  HeapWord* const t = _end_of_live;                                             \
-  debug_only(HeapWord* prev_q = NULL);                                          \
-                                                                                \
-  if (q < t && _first_dead > q &&                                               \
-      !oop(q)->is_gc_marked()) {                                                \
-    debug_only(                                                                 \
-    /* we have a chunk of the space which hasn't moved and we've reinitialized  \
-     * the mark word during the previous pass, so we can't use is_gc_marked for \
-     * the traversal. */                                                        \
-    HeapWord* const end = _first_dead;                                          \
-                                                                                \
-    while (q < end) {                                                           \
-      size_t size = obj_size(q);                                                \
-      assert(!oop(q)->is_gc_marked(),                                           \
-             "should be unmarked (special dense prefix handling)");             \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    }                                                                           \
-    )  /* debug_only */                                                         \
-                                                                                \
-    if (_first_dead == t) {                                                     \
-      q = t;                                                                    \
-    } else {                                                                    \
-      /* $$$ Funky */                                                           \
-      q = (HeapWord*) oop(_first_dead)->mark()->decode_pointer();               \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  const intx scan_interval = PrefetchScanIntervalInBytes;                       \
-  const intx copy_interval = PrefetchCopyIntervalInBytes;                       \
-  while (q < t) {                                                               \
-    if (!oop(q)->is_gc_marked()) {                                              \
-      /* mark is pointer to next marked oop */                                  \
-      debug_only(prev_q = q);                                                   \
-      q = (HeapWord*) oop(q)->mark()->decode_pointer();                         \
-      assert(q > prev_q, "we should be moving forward through memory");         \
-    } else {                                                                    \
-      /* prefetch beyond q */                                                   \
-      Prefetch::read(q, scan_interval);                                         \
-                                                                                \
-      /* size and destination */                                                \
-      size_t size = obj_size(q);                                                \
-      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();                \
-                                                                                \
-      /* prefetch beyond compaction_top */                                      \
-      Prefetch::write(compaction_top, copy_interval);                           \
-                                                                                \
-      /* copy object and reinit its mark */                                     \
-      assert(q != compaction_top, "everything in this pass should be moving");  \
-      Copy::aligned_conjoint_words(q, compaction_top, size);                    \
-      oop(compaction_top)->init_mark();                                         \
-      assert(oop(compaction_top)->klass() != NULL, "should have a class");      \
-                                                                                \
-      debug_only(prev_q = q);                                                   \
-      q += size;                                                                \
-    }                                                                           \
-  }                                                                             \
-                                                                                \
-  /* Let's remember if we were empty before we did the compaction. */           \
-  bool was_empty = used_region().is_empty();                                    \
-  /* Reset space after compaction is complete */                                \
-  reset_after_compaction();                                                     \
-  /* We do this clear, below, since it has overloaded meanings for some */      \
-  /* space subtypes.  For example, OffsetTableContigSpace's that were   */      \
-  /* compacted into will have had their offset table thresholds updated */      \
-  /* continuously, but those that weren't need to have their thresholds */      \
-  /* re-initialized.  Also mangles unused area for debugging.           */      \
-  if (used_region().is_empty()) {                                               \
-    if (!was_empty) clear(SpaceDecorator::Mangle);                              \
-  } else {                                                                      \
-    if (ZapUnusedHeapArea) mangle_unused_area();                                \
-  }                                                                             \
-}
-
 inline HeapWord* OffsetTableContigSpace::allocate(size_t size) {
   HeapWord* res = ContiguousSpace::allocate(size);
   if (res != NULL) {
@@ -334,4 +71,263 @@
   return _offsets.block_start(p);
 }
 
+template <class SpaceType>
+inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) {
+  // Compute the new addresses for the live objects and store it in the mark
+  // Used by universe::mark_sweep_phase2()
+  HeapWord* compact_top; // This is where we are currently compacting to.
+
+  // We're sure to be here before any objects are compacted into this
+  // space, so this is a good time to initialize this:
+  space->set_compaction_top(space->bottom());
+
+  if (cp->space == NULL) {
+    assert(cp->gen != NULL, "need a generation");
+    assert(cp->threshold == NULL, "just checking");
+    assert(cp->gen->first_compaction_space() == space, "just checking");
+    cp->space = cp->gen->first_compaction_space();
+    compact_top = cp->space->bottom();
+    cp->space->set_compaction_top(compact_top);
+    cp->threshold = cp->space->initialize_threshold();
+  } else {
+    compact_top = cp->space->compaction_top();
+  }
+
+  // We allow some amount of garbage towards the bottom of the space, so
+  // we don't start compacting before there is a significant gain to be made.
+  // Occasionally, we want to ensure a full compaction, which is determined
+  // by the MarkSweepAlwaysCompactCount parameter.
+  uint invocations = MarkSweep::total_invocations();
+  bool skip_dead = ((invocations % MarkSweepAlwaysCompactCount) != 0);
+
+  size_t allowed_deadspace = 0;
+  if (skip_dead) {
+    const size_t ratio = space->allowed_dead_ratio();
+    allowed_deadspace = (space->capacity() * ratio / 100) / HeapWordSize;
+  }
+
+  HeapWord* q = space->bottom();
+  HeapWord* t = space->scan_limit();
+
+  HeapWord*  end_of_live= q;            // One byte beyond the last byte of the last
+                                        // live object.
+  HeapWord*  first_dead = space->end(); // The first dead object.
+  LiveRange* liveRange  = NULL;         // The current live range, recorded in the
+                                        // first header of preceding free area.
+  space->_first_dead = first_dead;
+
+  const intx interval = PrefetchScanIntervalInBytes;
+
+  while (q < t) {
+    assert(!space->scanned_block_is_obj(q) ||
+           oop(q)->mark()->is_marked() || oop(q)->mark()->is_unlocked() ||
+           oop(q)->mark()->has_bias_pattern(),
+           "these are the only valid states during a mark sweep");
+    if (space->scanned_block_is_obj(q) && oop(q)->is_gc_marked()) {
+      // prefetch beyond q
+      Prefetch::write(q, interval);
+      size_t size = space->scanned_block_size(q);
+      compact_top = cp->space->forward(oop(q), size, cp, compact_top);
+      q += size;
+      end_of_live = q;
+    } else {
+      // run over all the contiguous dead objects
+      HeapWord* end = q;
+      do {
+        // prefetch beyond end
+        Prefetch::write(end, interval);
+        end += space->scanned_block_size(end);
+      } while (end < t && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked()));
+
+      // see if we might want to pretend this object is alive so that
+      // we don't have to compact quite as often.
+      if (allowed_deadspace > 0 && q == compact_top) {
+        size_t sz = pointer_delta(end, q);
+        if (space->insert_deadspace(allowed_deadspace, q, sz)) {
+          compact_top = cp->space->forward(oop(q), sz, cp, compact_top);
+          q = end;
+          end_of_live = end;
+          continue;
+        }
+      }
+
+      // otherwise, it really is a free region.
+
+      // for the previous LiveRange, record the end of the live objects.
+      if (liveRange) {
+        liveRange->set_end(q);
+      }
+
+      // record the current LiveRange object.
+      // liveRange->start() is overlaid on the mark word.
+      liveRange = (LiveRange*)q;
+      liveRange->set_start(end);
+      liveRange->set_end(end);
+
+      // see if this is the first dead region.
+      if (q < first_dead) {
+        first_dead = q;
+      }
+
+      // move on to the next object
+      q = end;
+    }
+  }
+
+  assert(q == t, "just checking");
+  if (liveRange != NULL) {
+    liveRange->set_end(q);
+  }
+  space->_end_of_live = end_of_live;
+  if (end_of_live < first_dead) {
+    first_dead = end_of_live;
+  }
+  space->_first_dead = first_dead;
+
+  // save the compaction_top of the compaction space.
+  cp->space->set_compaction_top(compact_top);
+}
+
+template <class SpaceType>
+inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) {
+  // adjust all the interior pointers to point at the new locations of objects
+  // Used by MarkSweep::mark_sweep_phase3()
+
+  HeapWord* q = space->bottom();
+  HeapWord* t = space->_end_of_live;  // Established by "prepare_for_compaction".
+
+  assert(space->_first_dead <= space->_end_of_live, "Stands to reason, no?");
+
+  if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
+    // we have a chunk of the space which hasn't moved and we've
+    // reinitialized the mark word during the previous pass, so we can't
+    // use is_gc_marked for the traversal.
+    HeapWord* end = space->_first_dead;
+
+    while (q < end) {
+      // I originally tried to conjoin "block_start(q) == q" to the
+      // assertion below, but that doesn't work, because you can't
+      // accurately traverse previous objects to get to the current one
+      // after their pointers have been
+      // updated, until the actual compaction is done.  dld, 4/00
+      assert(space->block_is_obj(q), "should be at block boundaries, and should be looking at objs");
+
+      // point all the oops to the new location
+      size_t size = oop(q)->adjust_pointers();
+      size = space->adjust_obj_size(size);
+
+      q += size;
+    }
+
+    if (space->_first_dead == t) {
+      q = t;
+    } else {
+      // $$$ This is funky.  Using this to read the previously written
+      // LiveRange.  See also use below.
+      q = (HeapWord*)oop(space->_first_dead)->mark()->decode_pointer();
+    }
+  }
+
+  const intx interval = PrefetchScanIntervalInBytes;
+
+  debug_only(HeapWord* prev_q = NULL);
+  while (q < t) {
+    // prefetch beyond q
+    Prefetch::write(q, interval);
+    if (oop(q)->is_gc_marked()) {
+      // q is alive
+      // point all the oops to the new location
+      size_t size = oop(q)->adjust_pointers();
+      size = space->adjust_obj_size(size);
+      debug_only(prev_q = q);
+      q += size;
+    } else {
+      // q is not a live object, so its mark should point at the next
+      // live object
+      debug_only(prev_q = q);
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();
+      assert(q > prev_q, "we should be moving forward through memory");
+    }
+  }
+
+  assert(q == t, "just checking");
+}
+
+template <class SpaceType>
+inline void CompactibleSpace::scan_and_compact(SpaceType* space) {
+  // Copy all live objects to their new location
+  // Used by MarkSweep::mark_sweep_phase4()
+
+  HeapWord*       q = space->bottom();
+  HeapWord* const t = space->_end_of_live;
+  debug_only(HeapWord* prev_q = NULL);
+
+  if (q < t && space->_first_dead > q && !oop(q)->is_gc_marked()) {
+    #ifdef ASSERT // Debug only
+      // we have a chunk of the space which hasn't moved and we've reinitialized
+      // the mark word during the previous pass, so we can't use is_gc_marked for
+      // the traversal.
+      HeapWord* const end = space->_first_dead;
+
+      while (q < end) {
+        size_t size = space->obj_size(q);
+        assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)");
+        prev_q = q;
+        q += size;
+      }
+    #endif
+
+    if (space->_first_dead == t) {
+      q = t;
+    } else {
+      // $$$ Funky
+      q = (HeapWord*) oop(space->_first_dead)->mark()->decode_pointer();
+    }
+  }
+
+  const intx scan_interval = PrefetchScanIntervalInBytes;
+  const intx copy_interval = PrefetchCopyIntervalInBytes;
+  while (q < t) {
+    if (!oop(q)->is_gc_marked()) {
+      // mark is pointer to next marked oop
+      debug_only(prev_q = q);
+      q = (HeapWord*) oop(q)->mark()->decode_pointer();
+      assert(q > prev_q, "we should be moving forward through memory");
+    } else {
+      // prefetch beyond q
+      Prefetch::read(q, scan_interval);
+
+      // size and destination
+      size_t size = space->obj_size(q);
+      HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee();
+
+      // prefetch beyond compaction_top
+      Prefetch::write(compaction_top, copy_interval);
+
+      // copy object and reinit its mark
+      assert(q != compaction_top, "everything in this pass should be moving");
+      Copy::aligned_conjoint_words(q, compaction_top, size);
+      oop(compaction_top)->init_mark();
+      assert(oop(compaction_top)->klass() != NULL, "should have a class");
+
+      debug_only(prev_q = q);
+      q += size;
+    }
+  }
+
+  // Let's remember if we were empty before we did the compaction.
+  bool was_empty = space->used_region().is_empty();
+  // Reset space after compaction is complete
+  space->reset_after_compaction();
+  // We do this clear, below, since it has overloaded meanings for some
+  // space subtypes.  For example, OffsetTableContigSpace's that were
+  // compacted into will have had their offset table thresholds updated
+  // continuously, but those that weren't need to have their thresholds
+  // re-initialized.  Also mangles unused area for debugging.
+  if (space->used_region().is_empty()) {
+    if (!was_empty) space->clear(SpaceDecorator::Mangle);
+  } else {
+    if (ZapUnusedHeapArea) space->mangle_unused_area();
+  }
+}
 #endif // SHARE_VM_MEMORY_SPACE_INLINE_HPP
--- a/hotspot/src/share/vm/oops/constMethod.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/oops/constMethod.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -116,7 +116,11 @@
   if (sizes->generic_signature_index() != 0) {
     extra_bytes += sizeof(u2);
   }
-  if (sizes->method_parameters_length() > 0) {
+  // This has to be a less-than-or-equal check, because we might be
+  // storing information from a zero-length MethodParameters
+  // attribute.  We have to store these, because in some cases, they
+  // cause the reflection API to throw a MalformedParametersException.
+  if (sizes->method_parameters_length() >= 0) {
     extra_bytes += sizeof(u2);
     extra_bytes += sizes->method_parameters_length() * sizeof(MethodParametersElement);
   }
@@ -237,7 +241,7 @@
     _flags |= _has_linenumber_table;
   if (sizes->generic_signature_index() != 0)
     _flags |= _has_generic_signature;
-  if (sizes->method_parameters_length() > 0)
+  if (sizes->method_parameters_length() >= 0)
     _flags |= _has_method_parameters;
   if (sizes->checked_exceptions_length() > 0)
     _flags |= _has_checked_exceptions;
@@ -272,7 +276,7 @@
   if (sizes->generic_signature_index() != 0)
     *(generic_signature_index_addr()) = sizes->generic_signature_index();
   // New data should probably go here.
-  if (sizes->method_parameters_length() > 0)
+  if (sizes->method_parameters_length() >= 0)
     *(method_parameters_length_addr()) = sizes->method_parameters_length();
   if (sizes->checked_exceptions_length() > 0)
     *(checked_exceptions_length_addr()) = sizes->checked_exceptions_length();
@@ -283,7 +287,7 @@
 }
 
 int ConstMethod::method_parameters_length() const {
-  return has_method_parameters() ? *(method_parameters_length_addr()) : 0;
+  return has_method_parameters() ? *(method_parameters_length_addr()) : -1;
 }
 
 MethodParametersElement* ConstMethod::method_parameters_start() const {
--- a/hotspot/src/share/vm/oops/constMethod.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/oops/constMethod.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -372,6 +372,11 @@
   ExceptionTableElement* exception_table_start() const;
 
   // method parameters table
+
+  // This returns -1 if no parameters are present, a non-negative
+  // value otherwise.  Note: sometimes, there are 0-length parameters
+  // attributes that must be reported up to the reflection API all the
+  // same.
   int method_parameters_length() const;
   MethodParametersElement* method_parameters_start() const;
 
--- a/hotspot/src/share/vm/oops/method.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -588,6 +588,15 @@
   return true;
 }
 
+bool Method::is_constant_getter() const {
+  int last_index = code_size() - 1;
+  // Check if the first 1-3 bytecodes are a constant push
+  // and the last bytecode is a return.
+  return (2 <= code_size() && code_size() <= 4 &&
+          Bytecodes::is_const(java_code_at(0)) &&
+          Bytecodes::length_for(java_code_at(0)) == last_index &&
+          Bytecodes::is_return(java_code_at(last_index)));
+}
 
 bool Method::is_initializer() const {
   return name() == vmSymbols::object_initializer_name() || is_static_initializer();
--- a/hotspot/src/share/vm/oops/method.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/oops/method.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -595,6 +595,9 @@
   // returns true if the method is an accessor function (setter/getter).
   bool is_accessor() const;
 
+  // returns true if the method does nothing but return a constant of primitive type
+  bool is_constant_getter() const;
+
   // returns true if the method is an initializer (<init> or <clinit>).
   bool is_initializer() const;
 
--- a/hotspot/src/share/vm/oops/methodData.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodData.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1134,7 +1134,7 @@
   _tenure_traps = 0;
   _num_loops = 0;
   _num_blocks = 0;
-  _would_profile = true;
+  _would_profile = unknown;
 
 #if INCLUDE_RTM_OPT
   _rtm_state = NoRTM; // No RTM lock eliding by default
--- a/hotspot/src/share/vm/oops/methodData.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/oops/methodData.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -2096,7 +2096,8 @@
   short             _num_loops;
   short             _num_blocks;
   // Does this method contain anything worth profiling?
-  bool              _would_profile;
+  enum WouldProfile {unknown, no_profile, profile};
+  WouldProfile      _would_profile;
 
   // Size of _data array in bytes.  (Excludes header and extra_data fields.)
   int _data_size;
@@ -2270,8 +2271,8 @@
   }
 #endif
 
-  void set_would_profile(bool p)              { _would_profile = p;    }
-  bool would_profile() const                  { return _would_profile; }
+  void set_would_profile(bool p)              { _would_profile = p ? profile : no_profile; }
+  bool would_profile() const                  { return _would_profile != no_profile; }
 
   int num_loops() const                       { return _num_loops;  }
   void set_num_loops(int n)                   { _num_loops = n;     }
--- a/hotspot/src/share/vm/opto/doCall.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/doCall.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -794,7 +794,7 @@
   Node* ex_klass_node = NULL;
   if (has_ex_handler() && !ex_type->klass_is_exact()) {
     Node* p = basic_plus_adr( ex_node, ex_node, oopDesc::klass_offset_in_bytes());
-    ex_klass_node = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
+    ex_klass_node = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
 
     // Compute the exception klass a little more cleverly.
     // Obvious solution is to simple do a LoadKlass from the 'ex_node'.
@@ -812,7 +812,7 @@
           continue;
         }
         Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
-        Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
+        Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT));
         ex_klass_node->init_req( i, k );
       }
       _gvn.set_type(ex_klass_node, TypeKlassPtr::OBJECT);
--- a/hotspot/src/share/vm/opto/graphKit.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1154,7 +1154,7 @@
   Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
   if (akls != NULL)  return akls;
   Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
-  return _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS) );
+  return _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), k_adr, TypeInstPtr::KLASS));
 }
 
 //-------------------------load_array_length-----------------------------------
@@ -2615,7 +2615,7 @@
   // types load from the super-class display table which is immutable.
   m = mem->memory_at(C->get_alias_index(gvn->type(p2)->is_ptr()));
   Node *kmem = might_be_cache ? m : C->immutable_memory();
-  Node *nkls = gvn->transform(LoadKlassNode::make(*gvn, kmem, p2, gvn->type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
+  Node *nkls = gvn->transform(LoadKlassNode::make(*gvn, NULL, kmem, p2, gvn->type(p2)->is_ptr(), TypeKlassPtr::OBJECT_OR_NULL));
 
   // Compile speed common case: ARE a subtype and we canNOT fail
   if( superklass == nkls )
--- a/hotspot/src/share/vm/opto/library_call.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -3345,7 +3345,7 @@
   if (region == NULL)  never_see_null = true;
   Node* p = basic_plus_adr(mirror, offset);
   const TypeKlassPtr*  kls_type = TypeKlassPtr::OBJECT_OR_NULL;
-  Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
+  Node* kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type));
   Node* null_ctl = top();
   kls = null_check_oop(kls, &null_ctl, never_see_null);
   if (region != NULL) {
@@ -3517,7 +3517,7 @@
       phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror())));
     // If we fall through, it's a plain class.  Get its _super.
     p = basic_plus_adr(kls, in_bytes(Klass::super_offset()));
-    kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
+    kls = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL));
     null_ctl = top();
     kls = null_check_oop(kls, &null_ctl);
     if (null_ctl != top()) {
@@ -3671,7 +3671,7 @@
     args[which_arg] = arg;
 
     Node* p = basic_plus_adr(arg, class_klass_offset);
-    Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
+    Node* kls = LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, adr_type, kls_type);
     klasses[which_arg] = _gvn.transform(kls);
   }
 
--- a/hotspot/src/share/vm/opto/macro.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/macro.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1197,8 +1197,7 @@
   }
 
   if (C->env()->dtrace_alloc_probes() ||
-      !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() ||
-                   (UseConcMarkSweepGC && CMSIncrementalMode))) {
+      !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc())) {
     // Force slow-path allocation
     always_slow = true;
     initial_slow_test = NULL;
@@ -2202,7 +2201,7 @@
     Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn);
     if (klass_node == NULL) {
       Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
-      klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
+      klass_node = transform_later(LoadKlassNode::make(_igvn, NULL, mem, k_adr, _igvn.type(k_adr)->is_ptr()));
 #ifdef _LP64
       if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
         assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
--- a/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/macroArrayCopy.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -529,7 +529,7 @@
       // (At this point we can assume disjoint_bases, since types differ.)
       int ek_offset = in_bytes(ObjArrayKlass::element_klass_offset());
       Node* p1 = basic_plus_adr(dest_klass, ek_offset);
-      Node* n1 = LoadKlassNode::make(_igvn, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
+      Node* n1 = LoadKlassNode::make(_igvn, NULL, C->immutable_memory(), p1, TypeRawPtr::BOTTOM);
       Node* dest_elem_klass = transform_later(n1);
       Node* cv = generate_checkcast_arraycopy(&local_ctrl, &local_mem,
                                               adr_type,
--- a/hotspot/src/share/vm/opto/memnode.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -861,6 +861,10 @@
 
 
 //=============================================================================
+// Should LoadNode::Ideal() attempt to remove control edges?
+bool LoadNode::can_remove_control() const {
+  return true;
+}
 uint LoadNode::size_of() const { return sizeof(*this); }
 uint LoadNode::cmp( const Node &n ) const
 { return !Type::cmp( _type, ((LoadNode&)n)._type ); }
@@ -1471,7 +1475,7 @@
 }
 
 //------------------------------Ideal------------------------------------------
-// If the load is from Field memory and the pointer is non-null, we can
+// If the load is from Field memory and the pointer is non-null, it might be possible to
 // zero out the control input.
 // If the offset is constant and the base is an object allocation,
 // try to hook me up to the exact initializing store.
@@ -1498,6 +1502,7 @@
       && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) {
     // Check for useless control edge in some common special cases
     if (in(MemNode::Control) != NULL
+        && can_remove_control()
         && phase->type(base)->higher_equal(TypePtr::NOTNULL)
         && all_controls_dominate(base, phase->C->start())) {
       // A method-invariant, non-null address (constant or 'this' argument).
@@ -2019,8 +2024,7 @@
 //=============================================================================
 //----------------------------LoadKlassNode::make------------------------------
 // Polymorphic factory method:
-Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) {
-  Node *ctl = NULL;
+Node* LoadKlassNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, const TypeKlassPtr* tk) {
   // sanity check the alias category against the created node type
   const TypePtr *adr_type = adr->bottom_type()->isa_ptr();
   assert(adr_type != NULL, "expecting TypeKlassPtr");
@@ -2040,6 +2044,12 @@
   return klass_value_common(phase);
 }
 
+// In most cases, LoadKlassNode does not have the control input set. If the control
+// input is set, it must not be removed (by LoadNode::Ideal()).
+bool LoadKlassNode::can_remove_control() const {
+  return false;
+}
+
 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
   // Either input is TOP ==> the result is TOP
   const Type *t1 = phase->type( in(MemNode::Memory) );
--- a/hotspot/src/share/vm/opto/memnode.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/memnode.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -148,6 +148,8 @@
 protected:
   virtual uint cmp(const Node &n) const;
   virtual uint size_of() const; // Size is bigger
+  // Should LoadNode::Ideal() attempt to remove control edges?
+  virtual bool can_remove_control() const;
   const Type* const _type;      // What kind of value is loaded?
 public:
 
@@ -171,8 +173,10 @@
   // we are equivalent to.  We look for Load of a Store.
   virtual Node *Identity( PhaseTransform *phase );
 
-  // If the load is from Field memory and the pointer is non-null, we can
+  // If the load is from Field memory and the pointer is non-null, it might be possible to
   // zero out the control input.
+  // If the offset is constant and the base is an object allocation,
+  // try to hook me up to the exact initializing store.
   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
 
   // Split instance field load through Phi.
@@ -431,6 +435,10 @@
 //------------------------------LoadKlassNode----------------------------------
 // Load a Klass from an object
 class LoadKlassNode : public LoadPNode {
+protected:
+  // In most cases, LoadKlassNode does not have the control input set. If the control
+  // input is set, it must not be removed (by LoadNode::Ideal()).
+  virtual bool can_remove_control() const;
 public:
   LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
     : LoadPNode(c, mem, adr, at, tk, mo) {}
@@ -440,8 +448,8 @@
   virtual bool depends_only_on_test() const { return true; }
 
   // Polymorphic factory method:
-  static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
-                     const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
+  static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
+                    const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
 };
 
 //------------------------------LoadNKlassNode---------------------------------
--- a/hotspot/src/share/vm/opto/parse1.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/parse1.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1987,7 +1987,7 @@
   // finalization.  In general this will fold up since the concrete
   // class is often visible so the access flags are constant.
   Node* klass_addr = basic_plus_adr( receiver, receiver, oopDesc::klass_offset_in_bytes() );
-  Node* klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), klass_addr, TypeInstPtr::KLASS) );
+  Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), klass_addr, TypeInstPtr::KLASS));
 
   Node* access_flags_addr = basic_plus_adr(klass, klass, in_bytes(Klass::access_flags_offset()));
   Node* access_flags = make_load(NULL, access_flags_addr, TypeInt::INT, T_INT, MemNode::unordered);
--- a/hotspot/src/share/vm/opto/parseHelper.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/opto/parseHelper.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -156,22 +156,43 @@
   int klass_offset = oopDesc::klass_offset_in_bytes();
   Node* p = basic_plus_adr( ary, ary, klass_offset );
   // p's type is array-of-OOPS plus klass_offset
-  Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
+  Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
   // Get the array klass
   const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();
 
-  // array_klass's type is generally INexact array-of-oop.  Heroically
-  // cast the array klass to EXACT array and uncommon-trap if the cast
-  // fails.
+  // The type of array_klass is usually INexact array-of-oop.  Heroically
+  // cast array_klass to EXACT array and uncommon-trap if the cast fails.
+  // Make constant out of the inexact array klass, but use it only if the cast
+  // succeeds.
   bool always_see_exact_class = false;
   if (MonomorphicArrayCheck
-      && !too_many_traps(Deoptimization::Reason_array_check)) {
+      && !too_many_traps(Deoptimization::Reason_array_check)
+      && !tak->klass_is_exact()
+      && tak != TypeKlassPtr::OBJECT) {
+      // Regarding the fourth condition in the if-statement from above:
+      //
+      // If the compiler has determined that the type of array 'ary' (represented
+      // by 'array_klass') is java/lang/Object, the compiler must not assume that
+      // the array 'ary' is monomorphic.
+      //
+      // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
+      // because it is not possible to perform a arraystore into an object that is not
+      // a "proper" array.
+      //
+      // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
+      // successfully perform the store.
+      //
+      // The implementation reasons for the condition are the following:
+      //
+      // java/lang/Object is the superclass of all arrays, but it is represented by the VM
+      // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
+      // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
+      //
+      // See issue JDK-8057622 for details.
+
     always_see_exact_class = true;
     // (If no MDO at all, hope for the best, until a trap actually occurs.)
-  }
 
-  // Is the array klass is exactly its defined type?
-  if (always_see_exact_class && !tak->klass_is_exact()) {
     // Make a constant out of the inexact array klass
     const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
     Node* con = makecon(extak);
@@ -202,11 +223,15 @@
   // Extract the array element class
   int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
   Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
-  Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );
+  // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
+  // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
+  // LoadKlassNode.
+  Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
+                                                       immutable_memory(), p2, tak));
 
   // Check (the hard way) and throw if not a subklass.
   // Result is ignored, we just need the CFG effects.
-  gen_checkcast( obj, a_e_klass );
+  gen_checkcast(obj, a_e_klass);
 }
 
 
--- a/hotspot/src/share/vm/prims/jvm.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1657,7 +1657,17 @@
   Handle reflected_method (THREAD, JNIHandles::resolve_non_null(method));
   const int num_params = mh->method_parameters_length();
 
-  if (0 != num_params) {
+  if (num_params < 0) {
+    // A -1 return value from method_parameters_length means there is no
+    // parameter data.  Return null to indicate this to the reflection
+    // API.
+    assert(num_params == -1, "num_params should be -1 if it is less than zero");
+    return (jobjectArray)NULL;
+  } else {
+    // Otherwise, we return something up to reflection, even if it is
+    // a zero-length array.  Why?  Because in some cases this can
+    // trigger a MalformedParametersException.
+
     // make sure all the symbols are properly formatted
     for (int i = 0; i < num_params; i++) {
       MethodParametersElement* params = mh->method_parameters_start();
@@ -1685,8 +1695,6 @@
       result->obj_at_put(i, param);
     }
     return (jobjectArray)JNIHandles::make_local(env, result());
-  } else {
-    return (jobjectArray)NULL;
   }
 }
 JVM_END
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -24,6 +24,8 @@
 
 #include "precompiled.hpp"
 
+#include <new>
+
 #include "code/codeCache.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/universe.hpp"
@@ -37,9 +39,11 @@
 
 #include "runtime/thread.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/deoptimization.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
 #include "runtime/vm_version.hpp"
+#include "runtime/sweeper.hpp"
 
 #include "utilities/array.hpp"
 #include "utilities/debug.hpp"
@@ -67,6 +71,7 @@
 #define SIZE_T_MAX_VALUE ((size_t) -1)
 
 bool WhiteBox::_used = false;
+volatile bool WhiteBox::compilation_locked = false;
 
 WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
   return (jlong)(void*)JNIHandles::resolve(obj);
@@ -302,13 +307,12 @@
 WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
   jlong addr = 0;
 
-    addr = (jlong)(uintptr_t)os::reserve_memory(size);
-    MemTracker::record_virtual_memory_type((address)addr, mtTest);
+  addr = (jlong)(uintptr_t)os::reserve_memory(size);
+  MemTracker::record_virtual_memory_type((address)addr, mtTest);
 
   return addr;
 WB_END
 
-
 WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
   os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
   MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
@@ -728,6 +732,29 @@
 WB_END
 
 
+WB_ENTRY(void, WB_LockCompilation(JNIEnv* env, jobject o, jlong timeout))
+  WhiteBox::compilation_locked = true;
+WB_END
+
+WB_ENTRY(void, WB_UnlockCompilation(JNIEnv* env, jobject o))
+  MonitorLockerEx mo(Compilation_lock, Mutex::_no_safepoint_check_flag);
+  WhiteBox::compilation_locked = false;
+  mo.notify_all();
+WB_END
+
+void WhiteBox::force_sweep() {
+  guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    NMethodSweeper::_should_sweep = true;
+  }
+  NMethodSweeper::possibly_sweep();
+}
+
+WB_ENTRY(void, WB_ForceNMethodSweep(JNIEnv* env, jobject o))
+  WhiteBox::force_sweep();
+WB_END
+
 WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString))
   ResourceMark rm(THREAD);
   int len;
@@ -774,6 +801,46 @@
   return features_string;
 WB_END
 
+int WhiteBox::get_blob_type(const CodeBlob* code) {
+  guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
+  return CodeCache::get_code_heap(code)->code_blob_type();
+}
+
+CodeHeap* WhiteBox::get_code_heap(int blob_type) {
+  guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
+  return CodeCache::get_code_heap(blob_type);
+}
+
+struct CodeBlobStub {
+  CodeBlobStub(const CodeBlob* blob) :
+      name(os::strdup(blob->name())),
+      size(blob->size()),
+      blob_type(WhiteBox::get_blob_type(blob)) { }
+  ~CodeBlobStub() { os::free((void*) name); }
+  const char* const name;
+  const int         size;
+  const int         blob_type;
+};
+
+static jobjectArray codeBlob2objectArray(JavaThread* thread, JNIEnv* env, CodeBlobStub* cb) {
+  jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  jobjectArray result = env->NewObjectArray(3, clazz, NULL);
+
+  jstring name = env->NewStringUTF(cb->name);
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  env->SetObjectArrayElement(result, 0, name);
+
+  jobject obj = integerBox(thread, env, cb->size);
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  env->SetObjectArrayElement(result, 1, obj);
+
+  obj = integerBox(thread, env, cb->blob_type);
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  env->SetObjectArrayElement(result, 2, obj);
+
+  return result;
+}
 
 WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
   ResourceMark rm(THREAD);
@@ -790,24 +857,90 @@
   ThreadToNativeFromVM ttn(thread);
   jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
   CHECK_JNI_EXCEPTION_(env, NULL);
-  result = env->NewObjectArray(3, clazz, NULL);
+  result = env->NewObjectArray(4, clazz, NULL);
   if (result == NULL) {
     return result;
   }
 
+  CodeBlobStub stub(code);
+  jobjectArray codeBlob = codeBlob2objectArray(thread, env, &stub);
+  env->SetObjectArrayElement(result, 0, codeBlob);
+
   jobject level = integerBox(thread, env, code->comp_level());
   CHECK_JNI_EXCEPTION_(env, NULL);
-  env->SetObjectArrayElement(result, 0, level);
+  env->SetObjectArrayElement(result, 1, level);
 
   jbyteArray insts = env->NewByteArray(insts_size);
   CHECK_JNI_EXCEPTION_(env, NULL);
   env->SetByteArrayRegion(insts, 0, insts_size, (jbyte*) code->insts_begin());
-  env->SetObjectArrayElement(result, 1, insts);
+  env->SetObjectArrayElement(result, 2, insts);
 
   jobject id = integerBox(thread, env, code->compile_id());
   CHECK_JNI_EXCEPTION_(env, NULL);
-  env->SetObjectArrayElement(result, 2, id);
+  env->SetObjectArrayElement(result, 3, id);
+
+  return result;
+WB_END
+
+CodeBlob* WhiteBox::allocate_code_blob(int size, int blob_type) {
+  guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to enabled");
+  BufferBlob* blob;
+  int full_size = CodeBlob::align_code_offset(sizeof(BufferBlob));
+  if (full_size < size) {
+    full_size += round_to(size - full_size, oopSize);
+  }
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    blob = (BufferBlob*) CodeCache::allocate(full_size, blob_type);
+  }
+  // Track memory usage statistic after releasing CodeCache_lock
+  MemoryService::track_code_cache_memory_usage();
+  ::new (blob) BufferBlob("WB::DummyBlob", full_size);
+  return blob;
+}
+
+WB_ENTRY(jlong, WB_AllocateCodeBlob(JNIEnv* env, jobject o, jint size, jint blob_type))
+    return (jlong) WhiteBox::allocate_code_blob(size, blob_type);
+WB_END
+
+WB_ENTRY(void, WB_FreeCodeBlob(JNIEnv* env, jobject o, jlong addr))
+    BufferBlob::free((BufferBlob*) addr);
+WB_END
 
+WB_ENTRY(jobjectArray, WB_GetCodeHeapEntries(JNIEnv* env, jobject o, jint blob_type))
+  ResourceMark rm;
+  GrowableArray<CodeBlobStub*> blobs;
+  {
+    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+    CodeHeap* heap = WhiteBox::get_code_heap(blob_type);
+    if (heap == NULL) {
+      return NULL;
+    }
+    for (CodeBlob* cb = (CodeBlob*) heap->first();
+         cb != NULL; cb = (CodeBlob*) heap->next(cb)) {
+      CodeBlobStub* stub = NEW_RESOURCE_OBJ(CodeBlobStub);
+      new (stub) CodeBlobStub(cb);
+      blobs.append(stub);
+    }
+  }
+  if (blobs.length() == 0) {
+    return NULL;
+  }
+  ThreadToNativeFromVM ttn(thread);
+  jobjectArray result = NULL;
+  jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
+  CHECK_JNI_EXCEPTION_(env, NULL);
+  result = env->NewObjectArray(blobs.length(), clazz, NULL);
+  if (result == NULL) {
+    return result;
+  }
+  int i = 0;
+  for (GrowableArrayIterator<CodeBlobStub*> it = blobs.begin();
+       it != blobs.end(); ++it) {
+    jobjectArray obj = codeBlob2objectArray(thread, env, *it);
+    env->SetObjectArrayElement(result, i, obj);
+    ++i;
+  }
   return result;
 WB_END
 
@@ -1018,6 +1151,8 @@
       CC"(Ljava/lang/reflect/Executable;II)Z",        (void*)&WB_EnqueueMethodForCompilation},
   {CC"clearMethodState",
       CC"(Ljava/lang/reflect/Executable;)V",          (void*)&WB_ClearMethodState},
+  {CC"lockCompilation",    CC"()V",                   (void*)&WB_LockCompilation},
+  {CC"unlockCompilation",  CC"()V",                   (void*)&WB_UnlockCompilation},
   {CC"isConstantVMFlag",   CC"(Ljava/lang/String;)Z", (void*)&WB_IsConstantVMFlag},
   {CC"isLockedVMFlag",     CC"(Ljava/lang/String;)Z", (void*)&WB_IsLockedVMFlag},
   {CC"setBooleanVMFlag",   CC"(Ljava/lang/String;Z)V",(void*)&WB_SetBooleanVMFlag},
@@ -1055,6 +1190,10 @@
   {CC"getCPUFeatures",     CC"()Ljava/lang/String;",  (void*)&WB_GetCPUFeatures     },
   {CC"getNMethod",         CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
                                                       (void*)&WB_GetNMethod         },
+  {CC"forceNMethodSweep",  CC"()V",                   (void*)&WB_ForceNMethodSweep  },
+  {CC"allocateCodeBlob",   CC"(II)J",                 (void*)&WB_AllocateCodeBlob   },
+  {CC"freeCodeBlob",       CC"(J)V",                  (void*)&WB_FreeCodeBlob       },
+  {CC"getCodeHeapEntries", CC"(I)[Ljava/lang/Object;",(void*)&WB_GetCodeHeapEntries },
   {CC"getThreadStackSize", CC"()J",                   (void*)&WB_GetThreadStackSize },
   {CC"getThreadRemainingStackSize", CC"()J",          (void*)&WB_GetThreadRemainingStackSize },
 };
--- a/hotspot/src/share/vm/prims/whitebox.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/prims/whitebox.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -54,17 +54,24 @@
     }                                                                  \
   } while (0)
 
+class CodeBlob;
+class CodeHeap;
+
 class WhiteBox : public AllStatic {
  private:
   static bool _used;
  public:
+  static volatile bool compilation_locked;
   static bool used()     { return _used; }
   static void set_used() { _used = true; }
   static int offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol);
   static const char* lookup_jstring(const char* field_name, oop object);
   static bool lookup_bool(const char* field_name, oop object);
-
+  static void force_sweep();
+  static int get_blob_type(const CodeBlob* code);
+  static CodeHeap* get_code_heap(int blob_type);
+  static CodeBlob* allocate_code_blob(int blob_type, int size);
   static int array_bytes_to_length(size_t bytes);
   static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
     JNINativeMethod* method_array, int method_count);
--- a/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/advancedThresholdPolicy.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -317,8 +317,8 @@
  * c. 0 -> (3->2) -> 4.
  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
- *    of the method to 2, because it'll allow it to run much faster without full profiling while c2
- *    is compiling.
+ *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
+ *    without full profiling while c2 is compiling.
  *
  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1777,7 +1777,7 @@
 #ifdef ASSERT
 static bool verify_serial_gc_flags() {
   return (UseSerialGC &&
-        !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
+        !(UseParNewGC || (UseConcMarkSweepGC) || UseG1GC ||
           UseParallelGC || UseParallelOldGC));
 }
 #endif // ASSERT
@@ -2191,10 +2191,6 @@
     warning("Using the ParNew young collector with the Serial old collector is deprecated "
         "and will likely be removed in a future release");
   }
-
-  if (CMSIncrementalMode) {
-    warning("Using incremental CMS is deprecated and will likely be removed in a future release");
-  }
 }
 
 void Arguments::check_deprecated_gc_flags() {
@@ -2316,31 +2312,8 @@
   status = status && ArgumentsExt::check_gc_consistency_user();
   status = status && check_stack_pages();
 
-  if (CMSIncrementalMode) {
-    if (!UseConcMarkSweepGC) {
-      jio_fprintf(defaultStream::error_stream(),
-                  "error:  invalid argument combination.\n"
-                  "The CMS collector (-XX:+UseConcMarkSweepGC) must be "
-                  "selected in order\nto use CMSIncrementalMode.\n");
-      status = false;
-    } else {
-      status = status && verify_percentage(CMSIncrementalDutyCycle,
-                                  "CMSIncrementalDutyCycle");
-      status = status && verify_percentage(CMSIncrementalDutyCycleMin,
-                                  "CMSIncrementalDutyCycleMin");
-      status = status && verify_percentage(CMSIncrementalSafetyFactor,
-                                  "CMSIncrementalSafetyFactor");
-      status = status && verify_percentage(CMSIncrementalOffset,
-                                  "CMSIncrementalOffset");
-      status = status && verify_percentage(CMSExpAvgFactor,
-                                  "CMSExpAvgFactor");
-      // If it was not set on the command line, set
-      // CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
-      if (CMSInitiatingOccupancyFraction < 0) {
-        FLAG_SET_DEFAULT(CMSInitiatingOccupancyFraction, 1);
-      }
-    }
-  }
+  status = status && verify_percentage(CMSIncrementalSafetyFactor,
+                                    "CMSIncrementalSafetyFactor");
 
   // CMS space iteration, which FLSVerifyAllHeapreferences entails,
   // insists that we hold the requisite locks so that the iteration is
@@ -2874,14 +2847,6 @@
     // -Xnoclassgc
     } else if (match_option(option, "-Xnoclassgc", &tail)) {
       FLAG_SET_CMDLINE(bool, ClassUnloading, false);
-    // -Xincgc: i-CMS
-    } else if (match_option(option, "-Xincgc", &tail)) {
-      FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
-      FLAG_SET_CMDLINE(bool, CMSIncrementalMode, true);
-    // -Xnoincgc: no i-CMS
-    } else if (match_option(option, "-Xnoincgc", &tail)) {
-      FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false);
-      FLAG_SET_CMDLINE(bool, CMSIncrementalMode, false);
     // -Xconcgc
     } else if (match_option(option, "-Xconcgc", &tail)) {
       FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
@@ -3711,7 +3676,6 @@
 #if !INCLUDE_ALL_GCS
 static void force_serial_gc() {
   FLAG_SET_DEFAULT(UseSerialGC, true);
-  FLAG_SET_DEFAULT(CMSIncrementalMode, false);  // special CMS suboption
   UNSUPPORTED_GC_OPTION(UseG1GC);
   UNSUPPORTED_GC_OPTION(UseParallelGC);
   UNSUPPORTED_GC_OPTION(UseParallelOldGC);
--- a/hotspot/src/share/vm/runtime/globals.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -1638,30 +1638,10 @@
           "The maximum size of young gen chosen by default per GC worker "  \
           "thread available")                                               \
                                                                             \
-  product(bool, CMSIncrementalMode, false,                                  \
-          "Whether CMS GC should operate in \"incremental\" mode")          \
-                                                                            \
-  product(uintx, CMSIncrementalDutyCycle, 10,                               \
-          "Percentage (0-100) of CMS incremental mode duty cycle. If "      \
-          "CMSIncrementalPacing is enabled, then this is just the initial " \
-          "value.")                                                         \
-                                                                            \
-  product(bool, CMSIncrementalPacing, true,                                 \
-          "Whether the CMS incremental mode duty cycle should be "          \
-          "automatically adjusted")                                         \
-                                                                            \
-  product(uintx, CMSIncrementalDutyCycleMin, 0,                             \
-          "Minimum percentage (0-100) of the CMS incremental duty cycle "   \
-          "used when CMSIncrementalPacing is enabled")                      \
-                                                                            \
   product(uintx, CMSIncrementalSafetyFactor, 10,                            \
           "Percentage (0-100) used to add conservatism when computing the " \
           "duty cycle")                                                     \
                                                                             \
-  product(uintx, CMSIncrementalOffset, 0,                                   \
-          "Percentage (0-100) by which the CMS incremental mode duty cycle "\
-          "is shifted to the right within the period between young GCs")    \
-                                                                            \
   product(uintx, CMSExpAvgFactor, 50,                                       \
           "Percentage (0-100) used to weight the current sample when "      \
           "computing exponential averages for CMS statistics")              \
@@ -1720,15 +1700,6 @@
           "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
           "duration exceeds this threshold in milliseconds")                \
                                                                             \
-  develop(bool, CMSTraceIncrementalMode, false,                             \
-          "Trace CMS incremental mode")                                     \
-                                                                            \
-  develop(bool, CMSTraceIncrementalPacing, false,                           \
-          "Trace CMS incremental mode pacing computation")                  \
-                                                                            \
-  develop(bool, CMSTraceThreadState, false,                                 \
-          "Trace the CMS thread state (enable the trace_state() method)")   \
-                                                                            \
   product(bool, CMSClassUnloadingEnabled, true,                             \
           "Whether class unloading enabled when using CMS GC")              \
                                                                             \
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -72,7 +72,6 @@
 Monitor* CGC_lock                     = NULL;
 Monitor* STS_lock                     = NULL;
 Monitor* SLT_lock                     = NULL;
-Monitor* iCMS_lock                    = NULL;
 Monitor* FullGCCount_lock             = NULL;
 Monitor* CMark_lock                   = NULL;
 Mutex*   CMRegionStack_lock           = NULL;
@@ -88,6 +87,7 @@
 Mutex*   Compile_lock                 = NULL;
 Monitor* MethodCompileQueue_lock      = NULL;
 Monitor* CompileThread_lock           = NULL;
+Monitor* Compilation_lock             = NULL;
 Mutex*   CompileTaskAlloc_lock        = NULL;
 Mutex*   CompileStatistics_lock       = NULL;
 Mutex*   MultiArray_lock              = NULL;
@@ -175,9 +175,6 @@
 
   def(CGC_lock                   , Monitor, special,     true ); // coordinate between fore- and background GC
   def(STS_lock                   , Monitor, leaf,        true );
-  if (UseConcMarkSweepGC) {
-    def(iCMS_lock                  , Monitor, special,     true ); // CMS incremental mode start/stop notification
-  }
   if (UseConcMarkSweepGC || UseG1GC) {
     def(FullGCCount_lock           , Monitor, leaf,        true ); // in support of ExplicitGCInvokesConcurrent
   }
@@ -278,7 +275,9 @@
   def(ProfileVM_lock               , Monitor, special,   false); // used for profiling of the VMThread
   def(CompileThread_lock           , Monitor, nonleaf+5,   false );
   def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
-
+  if (WhiteBoxAPI) {
+    def(Compilation_lock           , Monitor, leaf,        false );
+  }
 #ifdef INCLUDE_TRACE
   def(JfrMsg_lock                  , Monitor, leaf,        true);
   def(JfrBuffer_lock               , Mutex,   leaf,        true);
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -66,7 +66,6 @@
                                                  // fore- & background GC threads.
 extern Monitor* STS_lock;                        // used for joining/leaving SuspendibleThreadSet.
 extern Monitor* SLT_lock;                        // used in CMS GC for acquiring PLL
-extern Monitor* iCMS_lock;                       // CMS incremental mode start/stop notification
 extern Monitor* FullGCCount_lock;                // in support of "concurrent" full gc
 extern Monitor* CMark_lock;                      // used for concurrent mark thread coordination
 extern Mutex*   CMRegionStack_lock;              // used for protecting accesses to the CM region stack
@@ -91,6 +90,7 @@
 extern Mutex*   Compile_lock;                    // a lock held when Compilation is updating code (used to block CodeCache traversal, CHA updates, etc)
 extern Monitor* MethodCompileQueue_lock;         // a lock held when method compilations are enqueued, dequeued
 extern Monitor* CompileThread_lock;              // a lock held by compile threads during compilation system initialization
+extern Monitor* Compilation_lock;                // a lock used to pause compilation
 extern Mutex*   CompileTaskAlloc_lock;           // a lock held when CompileTasks are allocated
 extern Mutex*   CompileStatistics_lock;          // a lock held when updating compilation statistics
 extern Mutex*   MultiArray_lock;                 // a lock used to guard allocation of multi-dim arrays
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -228,6 +228,20 @@
   static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible); }
   static int Spinner_offset_in_bytes()     { return offset_of(ObjectMonitor, _Spinner); }
 
+  // ObjectMonitor references can be ORed with markOopDesc::monitor_value
+  // as part of the ObjectMonitor tagging mechanism. When we combine an
+  // ObjectMonitor reference with an offset, we need to remove the tag
+  // value in order to generate the proper address.
+  //
+  // We can either adjust the ObjectMonitor reference and then add the
+  // offset or we can adjust the offset that is added to the ObjectMonitor
+  // reference. The latter avoids an AGI (Address Generation Interlock)
+  // stall so the helper macro adjusts the offset value that is returned
+  // to the ObjectMonitor reference manipulation code:
+  //
+  #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
+    ((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
+
   // Eventually we'll make provisions for multiple callbacks, but
   // now one will suffice.
   static int (*SpinCallbackFunction)(intptr_t, int);
--- a/hotspot/src/share/vm/runtime/reflection.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/reflection.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -806,17 +806,16 @@
 
 oop Reflection::new_parameter(Handle method, int index, Symbol* sym,
                               int flags, TRAPS) {
-  Handle name;
 
-  // A null symbol here translates to the empty string
+  Handle rh = java_lang_reflect_Parameter::create(CHECK_NULL);
+
   if(NULL != sym) {
-    name = java_lang_String::create_from_symbol(sym, CHECK_NULL);
+    Handle name = java_lang_String::create_from_symbol(sym, CHECK_NULL);
+    java_lang_reflect_Parameter::set_name(rh(), name());
   } else {
-    name = java_lang_String::create_from_str("", CHECK_NULL);
+    java_lang_reflect_Parameter::set_name(rh(), NULL);
   }
 
-  Handle rh = java_lang_reflect_Parameter::create(CHECK_NULL);
-  java_lang_reflect_Parameter::set_name(rh(), name());
   java_lang_reflect_Parameter::set_modifiers(rh(), flags);
   java_lang_reflect_Parameter::set_executable(rh(), method());
   java_lang_reflect_Parameter::set_index(rh(), index);
--- a/hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -54,13 +54,17 @@
 // Simple methods are as good being compiled with C1 as C2.
 // Determine if a given method is such a case.
 bool SimpleThresholdPolicy::is_trivial(Method* method) {
-  if (method->is_accessor()) return true;
-  if (method->code() != NULL) {
-    MethodData* mdo = method->method_data();
-    if (mdo != NULL && mdo->num_loops() == 0 &&
-        (method->code_size() < 5  || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
-      return !mdo->would_profile();
-    }
+  if (method->is_accessor() ||
+      method->is_constant_getter()) {
+    return true;
+  }
+  if (method->has_loops() || method->code_size() >= 15) {
+    return false;
+  }
+  MethodData* mdo = method->method_data();
+  if (mdo != NULL && !mdo->would_profile() &&
+      (method->code_size() < 5  || (mdo->num_blocks() < 4))) {
+    return true;
   }
   return false;
 }
--- a/hotspot/src/share/vm/runtime/sweeper.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/sweeper.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -25,6 +25,8 @@
 #ifndef SHARE_VM_RUNTIME_SWEEPER_HPP
 #define SHARE_VM_RUNTIME_SWEEPER_HPP
 
+class WhiteBox;
+
 #include "utilities/ticks.hpp"
 // An NmethodSweeper is an incremental cleaner for:
 //    - cleanup inline caches
@@ -52,6 +54,8 @@
 //     nmethod's space is freed.
 
 class NMethodSweeper : public AllStatic {
+  friend class WhiteBox;
+ private:
   static long      _traversals;                   // Stack scan count, also sweep ID.
   static long      _total_nof_code_cache_sweeps;  // Total number of full sweeps of the code cache
   static long      _time_counter;                 // Virtual time used to periodically invoke sweeper
@@ -88,7 +92,6 @@
   static void handle_safepoint_request();
   static void do_stack_scanning();
   static void possibly_sweep();
-
  public:
   static long traversal_count()              { return _traversals; }
   static int  total_nof_methods_reclaimed()  { return _total_nof_methods_reclaimed; }
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp	Thu Nov 13 16:11:00 2014 -0800
@@ -527,12 +527,10 @@
   nonstatic_field(DefNewGeneration,            _next_gen,                                     Generation*)                           \
   nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                  \
   nonstatic_field(DefNewGeneration,            _age_table,                                    ageTable)                              \
-  nonstatic_field(DefNewGeneration,            _eden_space,                                   EdenSpace*)                            \
+  nonstatic_field(DefNewGeneration,            _eden_space,                                   ContiguousSpace*)                      \
   nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \
   nonstatic_field(DefNewGeneration,            _to_space,                                     ContiguousSpace*)                      \
                                                                                                                                      \
-  nonstatic_field(EdenSpace,                   _gen,                                          DefNewGeneration*)                     \
-                                                                                                                                     \
   nonstatic_field(Generation,                  _reserved,                                     MemRegion)                             \
   nonstatic_field(Generation,                  _virtual_space,                                VirtualSpace)                          \
   nonstatic_field(Generation,                  _level,                                        int)                                   \
@@ -1490,7 +1488,6 @@
   declare_toplevel_type(BitMap)                                           \
            declare_type(CompactibleSpace,             Space)              \
            declare_type(ContiguousSpace,              CompactibleSpace)   \
-           declare_type(EdenSpace,                    ContiguousSpace)    \
            declare_type(OffsetTableContigSpace,       ContiguousSpace)    \
            declare_type(TenuredSpace,                 OffsetTableContigSpace) \
   declare_toplevel_type(BarrierSet)                                       \
@@ -1532,7 +1529,6 @@
   declare_toplevel_type(CollectedHeap*)                                   \
   declare_toplevel_type(ContiguousSpace*)                                 \
   declare_toplevel_type(DefNewGeneration*)                                \
-  declare_toplevel_type(EdenSpace*)                                       \
   declare_toplevel_type(GenCollectedHeap*)                                \
   declare_toplevel_type(Generation*)                                      \
   declare_toplevel_type(GenerationSpec**)                                 \
--- a/hotspot/src/share/vm/services/mallocTracker.hpp	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/services/mallocTracker.hpp	Thu Nov 13 16:11:00 2014 -0800
@@ -243,15 +243,15 @@
   size_t           _flags     : 8;
   size_t           _pos_idx   : 16;
   size_t           _bucket_idx: 40;
-#define MAX_MALLOCSITE_TABLE_SIZE ((size_t)1 << 40)
-#define MAX_BUCKET_LENGTH         ((size_t)(1 << 16))
+#define MAX_MALLOCSITE_TABLE_SIZE right_n_bits(40)
+#define MAX_BUCKET_LENGTH         right_n_bits(16)
 #else
   size_t           _size      : 32;
   size_t           _flags     : 8;
   size_t           _pos_idx   : 8;
   size_t           _bucket_idx: 16;
-#define MAX_MALLOCSITE_TABLE_SIZE  ((size_t)(1 << 16))
-#define MAX_BUCKET_LENGTH          ((size_t)(1 << 8))
+#define MAX_MALLOCSITE_TABLE_SIZE  right_n_bits(16)
+#define MAX_BUCKET_LENGTH          right_n_bits(8)
 #endif  // _LP64
 
  public:
--- a/hotspot/src/share/vm/trace/traceEventClasses.xsl	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/src/share/vm/trace/traceEventClasses.xsl	Thu Nov 13 16:11:00 2014 -0800
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="utf-8"?>
 <!--
- Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 
  This code is free software; you can redistribute it and/or modify it
@@ -59,6 +59,7 @@
   void set_starttime(const Ticks&amp; time) {}
   void set_endtime(const Ticks&amp; time) {}
   bool should_commit() const { return false; }
+  static bool is_enabled() { return false; }
   void commit() const {}
 };
 
--- a/hotspot/test/TEST.groups	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/TEST.groups	Thu Nov 13 16:11:00 2014 -0800
@@ -174,11 +174,8 @@
   gc/g1/TestShrinkToOneRegion.java \
   gc/metaspace/G1AddMetaspaceDependency.java \
   gc/startup_warnings/TestCMS.java \
-  gc/startup_warnings/TestCMSIncrementalMode.java \
-  gc/startup_warnings/TestCMSNoIncrementalMode.java \
   gc/startup_warnings/TestDefaultMaxRAMFraction.java \
   gc/startup_warnings/TestDefNewCMS.java \
-  gc/startup_warnings/TestIncGC.java \
   gc/startup_warnings/TestParallelGC.java \
   gc/startup_warnings/TestParallelScavengeSerialOld.java \
   gc/startup_warnings/TestParNewCMS.java \
@@ -273,8 +270,6 @@
   gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \
   gc/concurrentMarkSweep/ \
   gc/startup_warnings/TestCMS.java \
-  gc/startup_warnings/TestCMSIncrementalMode.java \
-  gc/startup_warnings/TestCMSNoIncrementalMode.java \
   gc/startup_warnings/TestDefNewCMS.java \
   gc/startup_warnings/TestParNewCMS.java
 
@@ -431,7 +426,8 @@
   compiler/8005033/Test8005033.java \
   compiler/8005419/Test8005419.java \
   compiler/8005956/PolynomialRoot.java \
-  compiler/8007294/Test8007294.java
+  compiler/8007294/Test8007294.java \
+  compiler/EliminateAutoBox/UnsignedLoads.java
 
 hotspot_compiler_3 = \
   compiler/8007722/Test8007722.java \
--- a/hotspot/test/compiler/7068051/Test7068051.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/compiler/7068051/Test7068051.java	Thu Nov 13 16:11:00 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,57 +26,116 @@
  * @test
  * @bug 7068051
  * @summary SIGSEGV in PhaseIdealLoop::build_loop_late_post on T5440
+ * @library /testlibrary
  *
- * @run shell/timeout=300 Test7068051.sh
+ * @run main/othervm -showversion -Xbatch Test7068051
  */
 
-import java.io.*;
-import java.nio.*;
-import java.util.*;
-import java.util.zip.*;
+import com.oracle.java.testlibrary.JDKToolLauncher;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardCopyOption;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipFile;
 
 public class Test7068051 {
+    private static final String SELF_NAME = Test7068051.class.getSimpleName();
+    private static final String SELF_FILE_NAME = SELF_NAME + ".java";
+    private static final String JAR_NAME = "foo.jar";
+    private static final String TEST_PATH = System.getProperty("test.src");
+    private static final Path CURRENT_DIR = Paths.get(".");
+    private static final Path TEST_SOURCE_PATH = Paths.get(TEST_PATH, SELF_FILE_NAME);
 
-    public static void main (String[] args) throws Throwable {
-
-        ZipFile zf = new ZipFile(args[0]);
+    public static void main (String[] args) throws IOException {
+        createTestJarFile();
+        System.out.println("Running test...");
 
-        Enumeration<? extends ZipEntry> entries = zf.entries();
-        ArrayList<String> names = new ArrayList<String>();
-        while (entries.hasMoreElements()) {
-            names.add(entries.nextElement().getName());
-        }
+        try (ZipFile zf = new ZipFile(JAR_NAME)) {
 
-        byte[] bytes = new byte[16];
-        for (String name : names) {
-            ZipEntry e = zf.getEntry(name);
+            Enumeration<? extends ZipEntry> entries = zf.entries();
+            ArrayList<String> names = new ArrayList<String>();
+            while (entries.hasMoreElements()) {
+                names.add(entries.nextElement().getName());
+            }
 
-            if (e.isDirectory())
-                continue;
-
-            final InputStream is = zf.getInputStream(e);
+            byte[] bytes = new byte[16];
+            for (String name : names) {
+                ZipEntry e = zf.getEntry(name);
 
-            try  {
-                while (is.read(bytes) >= 0) {
+                if (e.isDirectory()) {
+                    continue;
                 }
-                is.close();
 
-            } catch (IOException x) {
-                 System.out.println("..................................");
-                 System.out.println("          -->  is :" + is);
-                 System.out.println("          is.hash :" + is.hashCode());
-                 System.out.println();
-                 System.out.println("           e.name :" + e.getName());
-                 System.out.println("           e.hash :" + e.hashCode());
-                 System.out.println("         e.method :" + e.getMethod());
-                 System.out.println("           e.size :" + e.getSize());
-                 System.out.println("          e.csize :" + e.getCompressedSize());
+                try (final InputStream is = zf.getInputStream(e)) {
+                    try {
+                        while (is.read(bytes) >= 0) {
+                        }
+                    } catch (IOException x) {
+                        System.out.println("..................................");
+                        System.out.println("          -->  is :" + is);
+                        System.out.println("          is.hash :" + is.hashCode());
+                        System.out.println();
+                        System.out.println("           e.name :" + e.getName());
+                        System.out.println("           e.hash :" + e.hashCode());
+                        System.out.println("         e.method :" + e.getMethod());
+                        System.out.println("           e.size :" + e.getSize());
+                        System.out.println("          e.csize :" + e.getCompressedSize());
+                        System.out.println("..................................");
 
-                 x.printStackTrace();
-                 System.out.println("..................................");
-                 System.exit(97);
+                        throw new AssertionError("IOException was throwing while read the archive. Test failed.", x);
+                    }
+                }
             }
         }
-        zf.close();
+        System.out.println("Test passed.");
+    }
+
+    private static void createTestJarFile() {
+        ArrayList<String> jarOptions = new ArrayList<>();
+
+        // jar cf foo.jar *
+        System.out.println("Creating jar file..");
+        jarOptions.add("cf");
+        jarOptions.add(JAR_NAME);
+        try {
+            for (int i = 0; i < 100; ++i) {
+                Path temp = Files.createTempFile(CURRENT_DIR, SELF_NAME, ".java");
+                Files.copy(TEST_SOURCE_PATH, temp, StandardCopyOption.REPLACE_EXISTING);
+                jarOptions.add(temp.toString());
+            }
+        } catch (IOException ex) {
+            throw new AssertionError("TESTBUG: Creating temp files failed.", ex);
+        }
+        runJar(jarOptions);
+
+        // jar -uf0 foo.jar Test7068051.java
+        System.out.println("Adding unpacked file...");
+        jarOptions.clear();
+        jarOptions.add("-uf0");
+        jarOptions.add(JAR_NAME);
+        jarOptions.add(TEST_SOURCE_PATH.toString());
+        runJar(jarOptions);
+    }
+
+    private static void runJar(List<String> params) {
+        JDKToolLauncher jar = JDKToolLauncher.create("jar");
+        for (String p : params) {
+            jar.addToolArg(p);
+        }
+        ProcessBuilder pb = new ProcessBuilder(jar.getCommand());
+        try {
+            OutputAnalyzer output = new OutputAnalyzer(pb.start());
+            output.shouldHaveExitValue(0);
+        } catch (IOException ex) {
+            throw new AssertionError("TESTBUG: jar failed.", ex);
+        }
     }
 }
--- a/hotspot/test/compiler/7068051/Test7068051.sh	Wed Jul 05 20:07:55 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-#!/bin/sh
-# 
-# Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-# 
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-# 
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-# 
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-# 
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-# 
-# 
-## some tests require path to find test source dir
-if [ "${TESTSRC}" = "" ]
-then
-  TESTSRC=${PWD}
-  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
-fi
-echo "TESTSRC=${TESTSRC}"
-## Adding common setup Variables for running shell tests.
-. ${TESTSRC}/../../test_env.sh
-
-set -x
-
-${COMPILEJAVA}/bin/jar xf ${COMPILEJAVA}/jre/lib/javaws.jar
-${COMPILEJAVA}/bin/jar cf foo.jar *
-cp ${TESTSRC}/Test7068051.java ./
-${COMPILEJAVA}/bin/jar -uf0 foo.jar Test7068051.java
-
-${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} -d . Test7068051.java
-
-${TESTJAVA}/bin/java ${TESTOPTS} -showversion -Xbatch Test7068051 foo.jar
-
--- a/hotspot/test/compiler/EliminateAutoBox/UnsignedLoads.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/compiler/EliminateAutoBox/UnsignedLoads.java	Thu Nov 13 16:11:00 2014 -0800
@@ -26,7 +26,7 @@
 /*
  * @test
  * @library /testlibrary
- * @run main/othervm -Xbatch -XX:+EliminateAutoBox
+ * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox
  *                   -XX:CompileOnly=::valueOf,::byteValue,::shortValue,::testUnsignedByte,::testUnsignedShort
  *                   UnsignedLoads
  */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/whitebox/AllocationCodeBlobTest.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.lang.management.MemoryPoolMXBean;
+import java.util.EnumSet;
+import java.util.ArrayList;
+
+import sun.hotspot.WhiteBox;
+import sun.hotspot.code.BlobType;
+import com.oracle.java.testlibrary.Asserts;
+
+/*
+ * @test AllocationCodeBlobTest
+ * @bug 8059624
+ * @library /testlibrary /testlibrary/whitebox
+ * @build AllocationCodeBlobTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,null::*
+ *                   -XX:-SegmentedCodeCache AllocationCodeBlobTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,null::*
+ *                   -XX:+SegmentedCodeCache AllocationCodeBlobTest
+ * @summary testing of WB::allocate/freeCodeBlob()
+ */
+public class AllocationCodeBlobTest {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final long CODE_CACHE_SIZE
+            = WHITE_BOX.getUintxVMFlag("ReservedCodeCacheSize");
+    private static final int SIZE = 1;
+
+    public static void main(String[] args) {
+        // check that Sweeper handels dummy blobs correctly
+        new ForcedSweeper(500).start();
+        EnumSet<BlobType> blobTypes = BlobType.getAvailable();
+        for (BlobType type : blobTypes) {
+            new AllocationCodeBlobTest(type).test();
+        }
+    }
+
+    private final BlobType type;
+    private final MemoryPoolMXBean bean;
+    private AllocationCodeBlobTest(BlobType type) {
+        this.type = type;
+        bean = type.getMemoryPool();
+    }
+
+    private void test() {
+        System.out.printf("type %s%n", type);
+        long start = getUsage();
+        long addr = WHITE_BOX.allocateCodeBlob(SIZE, type.id);
+        Asserts.assertNE(0, addr, "allocation failed");
+
+        long firstAllocation = getUsage();
+        Asserts.assertLTE(start + SIZE, firstAllocation,
+                "allocation should increase memory usage: "
+                + start + " + " + SIZE + " <= " + firstAllocation);
+
+        WHITE_BOX.freeCodeBlob(addr);
+        long firstFree = getUsage();
+        Asserts.assertLTE(firstFree, firstAllocation,
+                "free shouldn't increase memory usage: "
+                + firstFree + " <= " + firstAllocation);
+
+        addr = WHITE_BOX.allocateCodeBlob(SIZE, type.id);
+        Asserts.assertNE(0, addr, "allocation failed");
+
+        long secondAllocation = getUsage();
+        Asserts.assertEQ(firstAllocation, secondAllocation);
+
+        WHITE_BOX.freeCodeBlob(addr);
+        System.out.println("allocating till possible...");
+        ArrayList<Long> blobs = new ArrayList<>();
+        int size = (int) (CODE_CACHE_SIZE >> 7);
+        while ((addr = WHITE_BOX.allocateCodeBlob(size, type.id)) != 0) {
+            blobs.add(addr);
+        }
+        for (Long blob : blobs) {
+            WHITE_BOX.freeCodeBlob(blob);
+        }
+    }
+
+    private long getUsage() {
+        return bean.getUsage().getUsed();
+    }
+
+    private static class ForcedSweeper extends Thread {
+        private final int millis;
+        public ForcedSweeper(int millis) {
+            super("ForcedSweeper");
+            setDaemon(true);
+            this.millis = millis;
+        }
+        public void run() {
+            try {
+                while (true) {
+                    WHITE_BOX.forceNMethodSweep();
+                    Thread.sleep(millis);
+                }
+            } catch (InterruptedException e) {
+                Thread.currentThread().interrupt();
+                throw new Error(e);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/whitebox/GetCodeHeapEntriesTest.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+import java.util.Arrays;
+import java.util.EnumSet;
+
+import sun.hotspot.WhiteBox;
+import sun.hotspot.code.CodeBlob;
+import sun.hotspot.code.BlobType;
+import com.oracle.java.testlibrary.Asserts;
+
+/*
+ * @test GetCodeHeapEntriesTest
+ * @bug 8059624
+ * @library /testlibrary /testlibrary/whitebox
+ * @build GetCodeHeapEntriesTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:-SegmentedCodeCache
+ *                   GetCodeHeapEntriesTest
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:+WhiteBoxAPI -XX:+SegmentedCodeCache
+ *                   GetCodeHeapEntriesTest
+ * @summary testing of WB::getCodeHeapEntries()
+ */
+public class GetCodeHeapEntriesTest {
+    private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
+    private static final int SIZE = 1024;
+    private static final String DUMMY_NAME = "WB::DummyBlob";
+    private static EnumSet<BlobType> SEGMENTED_TYPES
+            = EnumSet.complementOf(EnumSet.of(BlobType.All));
+
+    public static void main(String[] args) {
+        EnumSet<BlobType> blobTypes = BlobType.getAvailable();
+        for (BlobType type : blobTypes) {
+            new GetCodeHeapEntriesTest(type).test();
+        }
+    }
+
+    private final BlobType type;
+    private GetCodeHeapEntriesTest(BlobType type) {
+        this.type = type;
+    }
+
+    private void test() {
+        System.out.printf("type %s%n", type);
+        long addr = WHITE_BOX.allocateCodeBlob(SIZE, type.id);
+        Asserts.assertNE(0, addr, "allocation failed");
+        CodeBlob[] blobs = CodeBlob.getCodeBlobs(type);
+        Asserts.assertNotNull(blobs);
+        CodeBlob blob = Arrays.stream(blobs)
+                              .filter(GetCodeHeapEntriesTest::filter)
+                              .findAny()
+                              .get();
+        Asserts.assertNotNull(blob);
+        Asserts.assertEQ(blob.code_blob_type, type);
+        Asserts.assertGTE(blob.size, SIZE);
+
+        WHITE_BOX.freeCodeBlob(addr);
+        blobs = CodeBlob.getCodeBlobs(type);
+        long count = Arrays.stream(blobs)
+                           .filter(GetCodeHeapEntriesTest::filter)
+                           .count();
+        Asserts.assertEQ(0L, count);
+    }
+
+    private static boolean filter(CodeBlob blob) {
+        if (blob == null) {
+            return false;
+        }
+        return DUMMY_NAME.equals(blob.name);
+    }
+}
--- a/hotspot/test/compiler/whitebox/GetNMethodTest.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/compiler/whitebox/GetNMethodTest.java	Thu Nov 13 16:11:00 2014 -0800
@@ -22,7 +22,9 @@
  *
  */
 
+import sun.hotspot.code.BlobType;
 import sun.hotspot.code.NMethod;
+import com.oracle.java.testlibrary.Asserts;
 
 /*
  * @test GetNMethodTest
@@ -52,21 +54,46 @@
 
         compile();
         checkCompiled();
+
         NMethod nmethod = NMethod.get(method, testCase.isOsr());
         if (IS_VERBOSE) {
             System.out.println("nmethod = " + nmethod);
         }
-        if (nmethod == null) {
-            throw new RuntimeException("nmethod of compiled method is null");
+        Asserts.assertNotNull(nmethod,
+                "nmethod of compiled method is null");
+        Asserts.assertNotNull(nmethod.insts,
+                "nmethod.insts of compiled method is null");
+        Asserts.assertGT(nmethod.insts.length, 0,
+                "compiled method's instructions is empty");
+        Asserts.assertNotNull(nmethod.code_blob_type, "blob type is null");
+        if (WHITE_BOX.getBooleanVMFlag("SegmentedCodeCache")) {
+            Asserts.assertNE(nmethod.code_blob_type, BlobType.All);
+            switch (nmethod.comp_level) {
+            case 1:
+            case 4:
+                checkBlockType(nmethod, BlobType.MethodNonProfiled);
+                break;
+            case 2:
+            case 3:
+                checkBlockType(nmethod, BlobType.MethodNonProfiled);
+                break;
+            default:
+                throw new Error("unexpected comp level " + nmethod);
+            }
+        } else {
+            Asserts.assertEQ(nmethod.code_blob_type, BlobType.All);
         }
-        if (nmethod.insts.length == 0) {
-            throw new RuntimeException("compiled method's instructions is empty");
-        }
+
         deoptimize();
         checkNotCompiled();
         nmethod = NMethod.get(method, testCase.isOsr());
-        if (nmethod != null) {
-            throw new RuntimeException("nmethod of non-compiled method isn't null");
-        }
+        Asserts.assertNull(nmethod,
+                "nmethod of non-compiled method isn't null");
+    }
+
+    private void checkBlockType(NMethod nmethod, BlobType expectedType) {
+        Asserts.assertEQ(nmethod.code_blob_type, expectedType,
+                String.format("blob_type[%s] for %d level isn't %s",
+                        nmethod.code_blob_type, nmethod.comp_level, expectedType));
     }
 }
--- a/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/compiler/whitebox/IsMethodCompilableTest.java	Thu Nov 13 16:11:00 2014 -0800
@@ -29,7 +29,7 @@
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  *                              sun.hotspot.WhiteBox$WhiteBoxPermission
  * @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
- * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
+ * @run main/othervm/timeout=2400 -Xbootclasspath/a:. -Xmixed -XX:-TieredCompilation -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:PerMethodRecompilationCutoff=3 -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* IsMethodCompilableTest
  * @summary testing of WB::isMethodCompilable()
  * @author igor.ignatyev@oracle.com
  */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/whitebox/LockCompilationTest.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test LockCompilationTest
+ * @bug 8059624
+ * @library /testlibrary /testlibrary/whitebox
+ * @build LockCompilationTest
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ *                              sun.hotspot.WhiteBox$WhiteBoxPermission
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,SimpleTestCase$Helper::* LockCompilationTest
+ * @summary testing of WB::lock/unlockCompilation()
+ */
+
+import java.io.OutputStream;
+import java.io.PrintWriter;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+
+import com.oracle.java.testlibrary.Asserts;
+
+public class LockCompilationTest extends CompilerWhiteBoxTest {
+    public static void main(String[] args) throws Exception {
+        CompilerWhiteBoxTest.main(LockCompilationTest::new, args);
+    }
+
+    private LockCompilationTest(TestCase testCase) {
+        super(testCase);
+        // to prevent inlining of #method
+        WHITE_BOX.testSetDontInlineMethod(method, true);
+    }
+
+    protected void test() throws Exception {
+        checkNotCompiled();
+
+        System.out.println("locking compilation");
+        WHITE_BOX.lockCompilation();
+
+        try {
+            System.out.println("trying to compile");
+            compile();
+            // to check if it works correctly w/ safepoints
+            System.out.println("going to safepoint");
+            WHITE_BOX.fullGC();
+            waitBackgroundCompilation();
+            Asserts.assertTrue(
+                    WHITE_BOX.isMethodQueuedForCompilation(method),
+                    method + " must be in queue");
+            Asserts.assertFalse(
+                    WHITE_BOX.isMethodCompiled(method, false),
+                    method + " must be not compiled");
+            Asserts.assertEQ(
+                    WHITE_BOX.getMethodCompilationLevel(method, false), 0,
+                    method + " comp_level must be == 0");
+            Asserts.assertFalse(
+                    WHITE_BOX.isMethodCompiled(method, true),
+                    method + " must be not osr_compiled");
+            Asserts.assertEQ(
+                    WHITE_BOX.getMethodCompilationLevel(method, true), 0,
+                    method + " osr_comp_level must be == 0");
+        } finally {
+            System.out.println("unlocking compilation");
+            WHITE_BOX.unlockCompilation();
+        }
+        waitBackgroundCompilation();
+        Asserts.assertFalse(
+                WHITE_BOX.isMethodQueuedForCompilation(method),
+                method + " must not be in queue");
+    }
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/TestNUMAPageSize.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test TestNUMAPageSize
+ * @summary Make sure that start up with NUMA support does not cause problems.
+ * @bug 8061467
+ * @key gc
+ * @key regression
+ * @run main/othervm -Xmx8M -XX:+UseNUMA TestNUMAPageSize
+ */
+
+public class TestNUMAPageSize {
+  public static void main(String args[]) throws Exception {
+    // nothing to do
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/concurrentMarkSweep/DisableResizePLAB.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,44 @@
+/*
+* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+*
+* This code is free software; you can redistribute it and/or modify it
+* under the terms of the GNU General Public License version 2 only, as
+* published by the Free Software Foundation.
+*
+* This code is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+* version 2 for more details (a copy is included in the LICENSE file that
+* accompanied this code).
+*
+* You should have received a copy of the GNU General Public License version
+* 2 along with this work; if not, write to the Free Software Foundation,
+* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+*
+* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+* or visit www.oracle.com if you need additional information or have any
+* questions.
+*/
+
+/*
+ * @test DisableResizePLAB
+ * @key gc
+ * @bug 8060467
+ * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
+ * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
+ * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -XX:+PrintGCDetails DisableResizePLAB
+ */
+
+public class DisableResizePLAB {
+    public static void main(String args[]) throws Exception {
+        Object garbage[] = new Object[1_000];
+        for (int i = 0; i < garbage.length; i++) {
+            garbage[i] = new byte[0];
+        }
+        long startTime = System.currentTimeMillis();
+        while (System.currentTimeMillis() - startTime < 10_000) {
+            Object o = new byte[1024];
+        }
+    }
+}
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java	Thu Nov 13 16:11:00 2014 -0800
@@ -279,8 +279,7 @@
             "-XX:\\+UseConcMarkSweepGC",
             "-XX:\\+UseParallelOldGC",
             "-XX:\\+UseParNewGC",
-            "-Xconcgc",
-            "-Xincgc"
+            "-Xconcgc"
         };
     }
 }
--- a/hotspot/test/gc/startup_warnings/TestCMSIncrementalMode.java	Wed Jul 05 20:07:55 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-
-/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*/
-
-/*
-* @test TestCMSIncrementalMode
-* @key gc
-* @bug 8006398
-* @summary Test that the deprecated CMSIncrementalMode print a warning message
-* @library /testlibrary
-*/
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-public class TestCMSIncrementalMode {
-
-  public static void main(String args[]) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:+CMSIncrementalMode", "-version");
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
-  }
-
-}
--- a/hotspot/test/gc/startup_warnings/TestCMSNoIncrementalMode.java	Wed Jul 05 20:07:55 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*/
-
-/*
-* @test TestCMSNoIncrementalMode
-* @key gc
-* @bug 8006398
-* @summary Test that CMS with incremental mode turned off does not print a warning message
-* @library /testlibrary
-*/
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-public class TestCMSNoIncrementalMode {
-
-  public static void main(String args[]) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version");
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldNotContain("deprecated");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
-  }
-
-}
--- a/hotspot/test/gc/startup_warnings/TestIncGC.java	Wed Jul 05 20:07:55 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*/
-
-/*
-* @test TestIncGC
-* @key gc
-* @bug 8006398
-* @summary Test that the deprecated -Xincgc print a warning message
-* @library /testlibrary
-*/
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-
-public class TestIncGC {
-
-  public static void main(String args[]) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xincgc", "-version");
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
-  }
-
-}
--- a/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java	Thu Nov 13 16:11:00 2014 -0800
@@ -27,7 +27,6 @@
  * @requires sun.arch.data.model == "32"
  * @key nmt jcmd stress
  * @library /testlibrary /testlibrary/whitebox
- * @ignore 8062870
  * @build MallocSiteHashOverflow
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
--- a/hotspot/test/runtime/NMT/MallocTrackingVerify.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/runtime/NMT/MallocTrackingVerify.java	Thu Nov 13 16:11:00 2014 -0800
@@ -27,7 +27,6 @@
  * @summary Test to verify correctness of malloc tracking
  * @key nmt jcmd
  * @library /testlibrary /testlibrary/whitebox
- * @ignore 8058251
  * @build MallocTrackingVerify
  * @run main ClassFileInstaller sun.hotspot.WhiteBox
  * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocTrackingVerify
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/contended/OopMapsSameGroup.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.BufferedReader;
+import java.io.InputStreamReader;
+import java.lang.Class;
+import java.lang.String;
+import java.lang.System;
+import java.lang.management.ManagementFactory;
+import java.lang.management.RuntimeMXBean;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CyclicBarrier;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
+import sun.misc.Unsafe;
+import sun.misc.Contended;
+
+/*
+ * @test
+ * @bug     8015272
+ * @summary \@Contended within the same group to use the same oop map
+ *
+ * @run main/othervm -XX:-RestrictContended -XX:ContendedPaddingWidth=128 -Xmx128m OopMapsSameGroup
+ */
+public class OopMapsSameGroup {
+
+    public static final int COUNT = 10000;
+
+    public static void main(String[] args) throws Exception {
+        Object o01 = new Object();
+        Object o02 = new Object();
+        Object o03 = new Object();
+        Object o04 = new Object();
+
+        R[] rs = new R[COUNT];
+
+        for (int i = 0; i < COUNT; i++) {
+           R r = new R();
+           r.o01 = o01;
+           r.o02 = o02;
+           r.o03 = o03;
+           r.o04 = o04;
+           rs[i] = r;
+        }
+
+        System.gc();
+
+        for (int i = 0; i < COUNT; i++) {
+           R r = rs[i];
+           if (r.o01 != o01) throw new Error("Test Error: o01");
+           if (r.o02 != o02) throw new Error("Test Error: o02");
+           if (r.o03 != o03) throw new Error("Test Error: o03");
+           if (r.o04 != o04) throw new Error("Test Error: o04");
+        }
+    }
+
+    public static class R {
+        @Contended("group1")
+        Object o01;
+
+        @Contended("group1")
+        Object o02;
+
+        @Contended("group2")
+        Object o03;
+
+        @Contended("group2")
+        Object o04;
+    }
+
+}
+
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Nov 13 16:11:00 2014 -0800
@@ -143,8 +143,14 @@
   }
   public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci);
   public native void    clearMethodState(Executable method);
+  public native void    lockCompilation();
+  public native void    unlockCompilation();
   public native int     getMethodEntryBci(Executable method);
   public native Object[] getNMethod(Executable method, boolean isOsr);
+  public native long    allocateCodeBlob(int size, int type);
+  public native void    freeCodeBlob(long addr);
+  public native void    forceNMethodSweep();
+  public native Object[] getCodeHeapEntries(int type);
 
   // Intered strings
   public native boolean isInStringTable(String str);
@@ -207,4 +213,13 @@
                        .findAny()
                        .orElse(null);
   }
+  public native int getOffsetForName0(String name);
+  public int getOffsetForName(String name) throws Exception {
+    int offset = getOffsetForName0(name);
+    if (offset == -1) {
+      throw new RuntimeException(name + " not found");
+    }
+    return offset;
+  }
+
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/code/BlobType.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.hotspot.code;
+
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryPoolMXBean;
+import java.util.EnumSet;
+
+import sun.hotspot.WhiteBox;
+
+public enum BlobType {
+    // Execution level 1 and 4 (non-profiled) nmethods (including native nmethods)
+    MethodNonProfiled(0, "CodeHeap 'non-profiled nmethods'"),
+    // Execution level 2 and 3 (profiled) nmethods
+    MethodProfiled(1, "CodeHeap 'profiled nmethods'"),
+    // Non-nmethods like Buffers, Adapters and Runtime Stubs
+    NonNMethod(2, "CodeHeap 'non-nmethods'"),
+    // All types (No code cache segmentation)
+    All(3, "CodeCache");
+
+    public final int id;
+    private final String beanName;
+
+    private BlobType(int id, String beanName) {
+        this.id = id;
+        this.beanName = beanName;
+    }
+
+    public MemoryPoolMXBean getMemoryPool() {
+        for (MemoryPoolMXBean bean : ManagementFactory.getMemoryPoolMXBeans()) {
+            String name = bean.getName();
+            if (beanName.equals(name)) {
+                return bean;
+            }
+        }
+        return null;
+    }
+    public static EnumSet<BlobType> getAvailable() {
+        WhiteBox whiteBox = WhiteBox.getWhiteBox();
+        if (!whiteBox.getBooleanVMFlag("SegmentedCodeCache")) {
+            // only All for non segmented world
+            return EnumSet.of(All);
+        }
+        if (System.getProperty("java.vm.info").startsWith("interpreted ")) {
+            // only NonNMethod for -Xint
+            return EnumSet.of(NonNMethod);
+        }
+
+        EnumSet<BlobType> result = EnumSet.complementOf(EnumSet.of(All));
+        if (!whiteBox.getBooleanVMFlag("TieredCompilation")
+                || whiteBox.getIntxVMFlag("TieredStopAtLevel") <= 1) {
+            // there is no MethodProfiled in non tiered world or pure C1
+            result.remove(MethodProfiled);
+        }
+        return result;
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/code/CodeBlob.java	Thu Nov 13 16:11:00 2014 -0800
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.hotspot.code;
+
+import sun.hotspot.WhiteBox;
+
+public class CodeBlob {
+  private static final WhiteBox WB = WhiteBox.getWhiteBox();
+  public static CodeBlob[] getCodeBlobs(BlobType type) {
+    Object[] obj = WB.getCodeHeapEntries(type.id);
+    if (obj == null) {
+      return null;
+    }
+    CodeBlob[] result = new CodeBlob[obj.length];
+    for (int i = 0, n = result.length; i < n; ++i) {
+      result[i] = new CodeBlob((Object[]) obj[i]);
+    }
+    return result;
+  }
+  protected CodeBlob(Object[] obj) {
+    assert obj.length == 3;
+    name = (String) obj[0];
+    size = (Integer) obj[1];
+    code_blob_type = BlobType.values()[(Integer) obj[2]];
+    assert code_blob_type.id == (Integer) obj[2];
+  }
+  public final String name;
+  public final int size;
+  public final BlobType code_blob_type;
+
+  @Override
+  public String toString() {
+    return "CodeBlob{"
+        + "name=" + name
+        + ", size=" + size
+        + ", code_blob_type=" + code_blob_type
+        + '}';
+  }
+}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/code/NMethod.java	Wed Jul 05 20:07:55 2017 +0200
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/code/NMethod.java	Thu Nov 13 16:11:00 2014 -0800
@@ -27,28 +27,30 @@
 import java.lang.reflect.Executable;
 import sun.hotspot.WhiteBox;
 
-public class NMethod {
+public class NMethod extends CodeBlob {
   private static final WhiteBox wb = WhiteBox.getWhiteBox();
   public static NMethod get(Executable method, boolean isOsr) {
     Object[] obj = wb.getNMethod(method, isOsr);
     return obj == null ? null : new NMethod(obj);
   }
   private NMethod(Object[] obj) {
-    assert obj.length == 3;
-    comp_level = (Integer) obj[0];
-    insts = (byte[]) obj[1];
-    compile_id = (Integer) obj[2];
+    super((Object[])obj[0]);
+    assert obj.length == 4;
+    comp_level = (Integer) obj[1];
+    insts = (byte[]) obj[2];
+    compile_id = (Integer) obj[3];
   }
-  public byte[] insts;
-  public int comp_level;
-  public int compile_id;
+  public final byte[] insts;
+  public final int comp_level;
+  public final int compile_id;
 
   @Override
   public String toString() {
-    return "NMethod{" +
-        "insts=" + insts +
-        ", comp_level=" + comp_level +
-        ", compile_id=" + compile_id +
-        '}';
+    return "NMethod{"
+        + super.toString()
+        + ", insts=" + insts
+        + ", comp_level=" + comp_level
+        + ", compile_id=" + compile_id
+        + '}';
   }
 }