Merge aefimov-dns-client-branch
authoraefimov
Thu, 14 Nov 2019 13:50:03 +0000
branchaefimov-dns-client-branch
changeset 59099 fcdb8e7ead8f
parent 58984 15e026239a6c (current diff)
parent 59075 355f4f42dda5 (diff)
child 59100 b92aac38b046
Merge
src/hotspot/share/gc/cms/adaptiveFreeList.cpp
src/hotspot/share/gc/cms/adaptiveFreeList.hpp
src/hotspot/share/gc/cms/allocationStats.cpp
src/hotspot/share/gc/cms/allocationStats.hpp
src/hotspot/share/gc/cms/cmsArguments.cpp
src/hotspot/share/gc/cms/cmsArguments.hpp
src/hotspot/share/gc/cms/cmsCardTable.cpp
src/hotspot/share/gc/cms/cmsCardTable.hpp
src/hotspot/share/gc/cms/cmsGCStats.cpp
src/hotspot/share/gc/cms/cmsGCStats.hpp
src/hotspot/share/gc/cms/cmsHeap.cpp
src/hotspot/share/gc/cms/cmsHeap.hpp
src/hotspot/share/gc/cms/cmsHeap.inline.hpp
src/hotspot/share/gc/cms/cmsLockVerifier.cpp
src/hotspot/share/gc/cms/cmsLockVerifier.hpp
src/hotspot/share/gc/cms/cmsOopClosures.hpp
src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp
src/hotspot/share/gc/cms/cmsVMOperations.cpp
src/hotspot/share/gc/cms/cmsVMOperations.hpp
src/hotspot/share/gc/cms/cms_globals.hpp
src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp
src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp
src/hotspot/share/gc/cms/compactibleFreeListSpace.inline.hpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp
src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp
src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp
src/hotspot/share/gc/cms/concurrentMarkSweepThread.hpp
src/hotspot/share/gc/cms/freeChunk.cpp
src/hotspot/share/gc/cms/freeChunk.hpp
src/hotspot/share/gc/cms/gSpaceCounters.cpp
src/hotspot/share/gc/cms/gSpaceCounters.hpp
src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp
src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.hpp
src/hotspot/share/gc/cms/parNewGeneration.cpp
src/hotspot/share/gc/cms/parNewGeneration.hpp
src/hotspot/share/gc/cms/parNewGeneration.inline.hpp
src/hotspot/share/gc/cms/parOopClosures.hpp
src/hotspot/share/gc/cms/parOopClosures.inline.hpp
src/hotspot/share/gc/cms/promotionInfo.cpp
src/hotspot/share/gc/cms/promotionInfo.hpp
src/hotspot/share/gc/cms/promotionInfo.inline.hpp
src/hotspot/share/gc/cms/vmStructs_cms.hpp
src/hotspot/share/gc/cms/yieldingWorkgroup.cpp
src/hotspot/share/gc/cms/yieldingWorkgroup.hpp
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/AdaptiveFreeList.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSBitMap.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSCollector.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CompactibleFreeListSpace.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ConcurrentMarkSweepGeneration.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/LinearAllocBlock.java
src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ParNewGeneration.java
test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMS.java
test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMSCondMark.java
test/hotspot/jtreg/gc/TestMemoryInitializationWithCMS.java
test/hotspot/jtreg/gc/arguments/TestCMSHeapSizeFlags.java
test/hotspot/jtreg/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java
test/hotspot/jtreg/gc/cms/DisableResizePLAB.java
test/hotspot/jtreg/gc/cms/GuardShrinkWarning.java
test/hotspot/jtreg/gc/cms/TestBubbleUpRef.java
test/hotspot/jtreg/gc/cms/TestCMSScavengeBeforeRemark.java
test/hotspot/jtreg/gc/cms/TestCriticalPriority.java
test/hotspot/jtreg/gc/cms/TestMBeanCMS.java
test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlCMS.java
test/hotspot/jtreg/gc/metaspace/TestMetaspaceCMSCancel.java
test/hotspot/jtreg/gc/startup_warnings/TestCMS.java
test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithCMS.java
test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithCMS.java
test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithCMS.java
test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithCMS.java
test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithParNew.java
test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithCMS.java
test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithParNew.java
test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithCMS.java
test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorGCCMSTest.java
test/jdk/java/lang/management/MemoryMXBean/MemoryManagementConcMarkSweepGC.sh
test/jdk/java/util/Arrays/ParallelSorting.java
test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSConcurrent.java
test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSMarkSweep.java
test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSConcurrent.java
test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSMarkSweep.java
test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithParNew.java
test/jdk/jdk/jfr/event/gc/collection/TestYoungGarbageCollectionEventWithParNew.java
test/jdk/jdk/jfr/event/gc/detailed/TestCMSConcurrentModeFailureEvent.java
test/jdk/jdk/jfr/event/gc/detailed/TestPromotionFailedEventWithParNew.java
test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventConcurrentCMS.java
test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventParNewCMS.java
test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSConcurrent.java
test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSMarkSweep.java
test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSConcurrent.java
test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSMarkSweep.java
test/jdk/jdk/jfr/event/gc/stacktrace/TestConcMarkSweepAllocationPendingStackTrace.java
test/jdk/jdk/jfr/event/gc/stacktrace/TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace.java
test/jdk/jdk/jfr/event/gc/stacktrace/TestParNewAllocationPendingStackTrace.java
test/jdk/jdk/jfr/event/oldobject/TestCMS.java
test/langtools/tools/javac/diags/examples/RestrictedTypeNotAllowedPreview.java
--- a/.hgtags	Fri Nov 08 14:54:17 2019 +0000
+++ b/.hgtags	Thu Nov 14 13:50:03 2019 +0000
@@ -595,3 +595,4 @@
 54ffb15c48399dd59922ee22bb592d815307e77c jdk-14+20
 c16ac7a2eba4e73cb4f7ee9294dd647860eebff0 jdk-14+21
 83810b7d12e7ff761ad3dd91f323a22dad96f108 jdk-14+22
+15936b142f86731afa4b1a2c0fe4a01e806c4944 jdk-14+23
--- a/make/autoconf/hotspot.m4	Fri Nov 08 14:54:17 2019 +0000
+++ b/make/autoconf/hotspot.m4	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -25,11 +25,11 @@
 
 # All valid JVM features, regardless of platform
 VALID_JVM_FEATURES="compiler1 compiler2 zero minimal dtrace jvmti jvmci \
-    graal vm-structs jni-check services management cmsgc epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
+    graal vm-structs jni-check services management epsilongc g1gc parallelgc serialgc shenandoahgc zgc nmt cds \
     static-build link-time-opt aot jfr"
 
 # Deprecated JVM features (these are ignored, but with a warning)
-DEPRECATED_JVM_FEATURES="trace"
+DEPRECATED_JVM_FEATURES="trace cmsgc"
 
 # All valid JVM variants
 VALID_JVM_VARIANTS="server client minimal core zero custom"
@@ -326,10 +326,6 @@
     AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1'])
   fi
 
-  if HOTSPOT_CHECK_JVM_FEATURE(cmsgc) && ! HOTSPOT_CHECK_JVM_FEATURE(serialgc); then
-    AC_MSG_ERROR([Specified JVM feature 'cmsgc' requires feature 'serialgc'])
-  fi
-
   # Enable JFR by default, except for Zero, linux-sparcv9 and on minimal.
   if ! HOTSPOT_CHECK_JVM_VARIANT(zero); then
     if test "x$OPENJDK_TARGET_OS" != xaix; then
@@ -351,7 +347,8 @@
   # Only enable ZGC on supported platforms
   AC_MSG_CHECKING([if zgc can be built])
   if (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xx86_64") || \
-     (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64"); then
+     (test "x$OPENJDK_TARGET_OS" = "xlinux" && test "x$OPENJDK_TARGET_CPU" = "xaarch64") ||
+     (test "x$OPENJDK_TARGET_OS" = "xmacosx" && test "x$OPENJDK_TARGET_CPU" = "xx86_64"); then
     AC_MSG_RESULT([yes])
   else
     DISABLED_JVM_FEATURES="$DISABLED_JVM_FEATURES zgc"
@@ -490,7 +487,7 @@
   fi
 
   # All variants but minimal (and custom) get these features
-  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES cmsgc g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
+  NON_MINIMAL_FEATURES="$NON_MINIMAL_FEATURES g1gc parallelgc serialgc epsilongc shenandoahgc jni-check jvmti management nmt services vm-structs zgc"
 
   # Disable CDS on AIX.
   if test "x$OPENJDK_TARGET_OS" = "xaix"; then
--- a/make/autoconf/toolchain.m4	Fri Nov 08 14:54:17 2019 +0000
+++ b/make/autoconf/toolchain.m4	Thu Nov 14 13:50:03 2019 +0000
@@ -481,7 +481,7 @@
     COMPILER_VERSION_STRING=`$ECHO $COMPILER_VERSION_OUTPUT | \
         $SED -e 's/ *Copyright .*//'`
     COMPILER_VERSION_NUMBER=`$ECHO $COMPILER_VERSION_OUTPUT | \
-        $SED -e 's/^.* \(@<:@1-9@:>@\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
+        $SED -e 's/^.* \(@<:@1-9@:>@<:@0-9@:>@*\.@<:@0-9.@:>@*\)@<:@^0-9.@:>@.*$/\1/'`
   elif test  "x$TOOLCHAIN_TYPE" = xclang; then
     # clang --version output typically looks like
     #    Apple LLVM version 5.0 (clang-500.2.79) (based on LLVM 3.3svn)
--- a/make/hotspot/lib/JvmDtraceObjects.gmk	Fri Nov 08 14:54:17 2019 +0000
+++ b/make/hotspot/lib/JvmDtraceObjects.gmk	Thu Nov 14 13:50:03 2019 +0000
@@ -79,12 +79,6 @@
         vmThread.o \
     )
 
-    ifeq ($(call check-jvm-feature, cmsgc), true)
-      DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
-          cmsVMOperations.o \
-      )
-    endif
-
     ifeq ($(call check-jvm-feature, parallelgc), true)
       DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
           psVMOperations.o \
--- a/make/hotspot/lib/JvmFeatures.gmk	Fri Nov 08 14:54:17 2019 +0000
+++ b/make/hotspot/lib/JvmFeatures.gmk	Thu Nov 14 13:50:03 2019 +0000
@@ -138,11 +138,6 @@
       aotLoader.cpp compiledIC_aot.cpp
 endif
 
-ifneq ($(call check-jvm-feature, cmsgc), true)
-  JVM_CFLAGS_FEATURES += -DINCLUDE_CMSGC=0
-  JVM_EXCLUDE_PATTERNS += gc/cms
-endif
-
 ifneq ($(call check-jvm-feature, g1gc), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
   JVM_EXCLUDE_PATTERNS += gc/g1
--- a/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/make/hotspot/src/native/dtrace/generateJvmOffsets.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,7 +63,6 @@
  */
 
 #pragma weak tty
-#pragma weak CMSExpAvgFactor
 
 #if defined(i386) || defined(__i386) || defined(__amd64)
 #pragma weak noreg
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Thu Nov 14 13:50:03 2019 +0000
@@ -1192,9 +1192,6 @@
   // predicate controlling translation of CompareAndSwapX
   bool needs_acquiring_load_exclusive(const Node *load);
 
-  // predicate controlling translation of StoreCM
-  bool unnecessary_storestore(const Node *storecm);
-
   // predicate controlling addressing modes
   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 %}
@@ -1583,29 +1580,6 @@
   return true;
 }
 
-// predicate controlling translation of StoreCM
-//
-// returns true if a StoreStore must precede the card write otherwise
-// false
-
-bool unnecessary_storestore(const Node *storecm)
-{
-  assert(storecm->Opcode()  == Op_StoreCM, "expecting a StoreCM");
-
-  // we need to generate a dmb ishst between an object put and the
-  // associated card mark when we are using CMS without conditional
-  // card marking
-
-  if (UseConcMarkSweepGC && !UseCondCardMark) {
-    return false;
-  }
-
-  // a storestore is unnecesary in all other cases
-
-  return true;
-}
-
-
 #define __ _masm.
 
 // advance declarations for helper functions to convert register
@@ -7220,7 +7194,6 @@
 instruct storeimmCM0(immI0 zero, memory mem)
 %{
   match(Set mem (StoreCM mem zero));
-  predicate(unnecessary_storestore(n));
 
   ins_cost(INSN_COST);
   format %{ "storestore (elided)\n\t"
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -840,6 +840,7 @@
           __ sub(arr_size, arr_size, t1);  // body length
           __ add(t1, t1, obj);       // body start
           __ initialize_body(t1, arr_size, 0, t2);
+          __ membar(Assembler::StoreStore);
           __ verify_oop(obj);
 
           __ ret(lr);
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -22,8 +22,8 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
 #include "gc/shenandoah/shenandoahForwarding.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
@@ -254,32 +254,16 @@
     dst = rscratch1;
   }
 
-  RegSet to_save_r1 = RegSet::of(r1);
-  // If outgoing register is r1, we can clobber it
-  if (result_dst != r1) {
-    __ push(to_save_r1, sp);
-  }
+  // Save r0 and r1, unless it is an output register
+  RegSet to_save = RegSet::of(r0, r1) - result_dst;
+  __ push(to_save, sp);
   __ lea(r1, load_addr);
-
-  RegSet to_save_r0 = RegSet::of(r0);
-  if (dst != r0) {
-    __ push(to_save_r0, sp);
-    __ mov(r0, dst);
-  }
+  __ mov(r0, dst);
 
   __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
 
-  if (result_dst != r0) {
-    __ mov(result_dst, r0);
-  }
-
-  if (dst != r0) {
-    __ pop(to_save_r0, sp);
-  }
-
-  if (result_dst != r1) {
-    __ pop(to_save_r1, sp);
-  }
+  __ mov(result_dst, r0);
+  __ pop(to_save, sp);
 
   __ bind(done);
   __ leave();
@@ -370,8 +354,8 @@
     return;
   }
 
-  // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set
-  if (ShenandoahLoadRefBarrier) {
+  // 2: load a reference from src location and apply LRB if needed
+  if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
     Register result_dst = dst;
 
     // Preserve src location for LRB
@@ -382,9 +366,7 @@
 
     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 
-    // Native barrier is for concurrent root processing
-    bool in_native = (decorators & IN_NATIVE) != 0;
-    if (in_native && ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
+    if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
       load_reference_barrier_native(masm, dst, src);
     } else {
       load_reference_barrier(masm, dst, src);
@@ -398,25 +380,17 @@
     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
   }
 
-  // 3: apply keep-alive barrier if ShenandoahKeepAliveBarrier is set
-  if (ShenandoahKeepAliveBarrier) {
-    bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
-    bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
-    bool on_reference = on_weak || on_phantom;
-    bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
-    bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0 || is_traversal_mode;
-
-    if (on_reference && keep_alive) {
-      __ enter();
-      satb_write_barrier_pre(masm /* masm */,
-                             noreg /* obj */,
-                             dst /* pre_val */,
-                             rthread /* thread */,
-                             tmp1 /* tmp */,
-                             true /* tosca_live */,
-                             true /* expand_call */);
-      __ leave();
-    }
+  // 3: apply keep-alive barrier if needed
+  if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
+    __ enter();
+    satb_write_barrier_pre(masm /* masm */,
+                           noreg /* obj */,
+                           dst /* pre_val */,
+                           rthread /* thread */,
+                           tmp1 /* tmp */,
+                           true /* tosca_live */,
+                           true /* expand_call */);
+    __ leave();
   }
 }
 
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -64,9 +64,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/arm/globals_arm.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/arm/globals_arm.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -63,9 +63,6 @@
 
 define_pd_global(bool,  PreserveFramePointer,     false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker,    16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 0);
 
 // No performance work done here yet.
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -322,7 +322,7 @@
 void PatchingStub::emit_code(LIR_Assembler* ce) {
   // copy original code here
   assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
-         "not enough room for call");
+         "not enough room for call, need %d", _bytes_to_copy);
   assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
 
   Label call_patch;
@@ -340,7 +340,7 @@
     __ load_const(_obj, addrlit, R0);
     DEBUG_ONLY( compare_with_patch_site(__ code_section()->start() + being_initialized_entry, _pc_start, _bytes_to_copy); )
   } else {
-    // Make a copy the code which is going to be patched.
+    // Make a copy of the code which is going to be patched.
     for (int i = 0; i < _bytes_to_copy; i++) {
       address ptr = (address)(_pc_start + i);
       int a_byte = (*ptr) & 0xFF;
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -743,10 +743,11 @@
           if (UseCompressedOops && !wide) {
             // Encoding done in caller
             __ stw(from_reg->as_register(), offset, base);
+            __ verify_coop(from_reg->as_register(), FILE_AND_LINE);
           } else {
             __ std(from_reg->as_register(), offset, base);
+            __ verify_oop(from_reg->as_register(), FILE_AND_LINE);
           }
-          __ verify_oop(from_reg->as_register());
           break;
         }
       case T_FLOAT : __ stfs(from_reg->as_float_reg(), offset, base); break;
@@ -783,10 +784,11 @@
         if (UseCompressedOops && !wide) {
           // Encoding done in caller.
           __ stwx(from_reg->as_register(), base, disp);
+          __ verify_coop(from_reg->as_register(), FILE_AND_LINE); // kills R0
         } else {
           __ stdx(from_reg->as_register(), base, disp);
+          __ verify_oop(from_reg->as_register(), FILE_AND_LINE); // kills R0
         }
-        __ verify_oop(from_reg->as_register()); // kills R0
         break;
       }
     case T_FLOAT : __ stfsx(from_reg->as_float_reg(), base, disp); break;
@@ -831,7 +833,7 @@
           } else {
             __ ld(to_reg->as_register(), offset, base);
           }
-          __ verify_oop(to_reg->as_register());
+          __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
           break;
         }
       case T_FLOAT:  __ lfs(to_reg->as_float_reg(), offset, base); break;
@@ -862,7 +864,7 @@
         } else {
           __ ldx(to_reg->as_register(), base, disp);
         }
-        __ verify_oop(to_reg->as_register());
+        __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
         break;
       }
     case T_FLOAT:  __ lfsx(to_reg->as_float_reg() , base, disp); break;
@@ -1141,7 +1143,7 @@
   }
 
   if (addr->base()->type() == T_OBJECT) {
-    __ verify_oop(src);
+    __ verify_oop(src, FILE_AND_LINE);
   }
 
   PatchingStub* patch = NULL;
@@ -1238,7 +1240,7 @@
     ShouldNotReachHere();
   }
   if (is_reference_type(to_reg->type())) {
-    __ verify_oop(to_reg->as_register());
+    __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
   }
 }
 
@@ -1265,7 +1267,7 @@
   }
 
   if (addr->base()->is_oop_register()) {
-    __ verify_oop(src);
+    __ verify_oop(src, FILE_AND_LINE);
   }
 
   PatchingStub* patch = NULL;
@@ -2321,7 +2323,7 @@
                      *op->stub()->entry());
 
   __ bind(*op->stub()->continuation());
-  __ verify_oop(op->obj()->as_register());
+  __ verify_oop(op->obj()->as_register(), FILE_AND_LINE);
 }
 
 
@@ -2546,7 +2548,7 @@
     Register Rtmp1 = op->tmp3()->as_register();
     bool should_profile = op->should_profile();
 
-    __ verify_oop(value);
+    __ verify_oop(value, FILE_AND_LINE);
     CodeStub* stub = op->stub();
     // Check if it needs to be profiled.
     ciMethodData* md = NULL;
@@ -3099,7 +3101,7 @@
   assert(do_null || do_update, "why are we here?");
   assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
 
-  __ verify_oop(obj);
+  __ verify_oop(obj, FILE_AND_LINE);
 
   if (do_null) {
     if (!TypeEntries::was_null_seen(current_klass)) {
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -44,7 +44,7 @@
   const Register temp_reg = R12_scratch2;
   Label Lmiss;
 
-  verify_oop(receiver);
+  verify_oop(receiver, FILE_AND_LINE);
   MacroAssembler::null_check(receiver, oopDesc::klass_offset_in_bytes(), &Lmiss);
   load_klass(temp_reg, receiver);
 
@@ -100,7 +100,7 @@
   // Load object header.
   ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);
 
-  verify_oop(Roop);
+  verify_oop(Roop, FILE_AND_LINE);
 
   // Save object being locked into the BasicObjectLock...
   std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
@@ -157,7 +157,7 @@
   if (UseBiasedLocking) {
     // Load the object out of the BasicObjectLock.
     ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
-    verify_oop(Roop);
+    verify_oop(Roop, FILE_AND_LINE);
     biased_locking_exit(CCR0, Roop, R0, done);
   }
   // Test first it it is a fast recursive unlock.
@@ -167,7 +167,7 @@
   if (!UseBiasedLocking) {
     // Load object.
     ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
-    verify_oop(Roop);
+    verify_oop(Roop, FILE_AND_LINE);
   }
 
   // Check if it is still a light weight lock, this is is true if we see
@@ -316,7 +316,7 @@
 //         relocInfo::runtime_call_type);
   }
 
-  verify_oop(obj);
+  verify_oop(obj, FILE_AND_LINE);
 }
 
 
@@ -383,7 +383,7 @@
     //     relocInfo::runtime_call_type);
   }
 
-  verify_oop(obj);
+  verify_oop(obj, FILE_AND_LINE);
 }
 
 
@@ -399,8 +399,7 @@
   bne(CCR0, not_null);
   stop("non-null oop required");
   bind(not_null);
-  if (!VerifyOops) return;
-  verify_oop(r);
+  verify_oop(r, FILE_AND_LINE);
 }
 
 #endif // PRODUCT
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -335,12 +335,12 @@
   __ ld(value, 0, tmp1);      // Resolve (untagged) jobject.
 
   __ beq(CCR0, not_weak);     // Test for jweak tag.
-  __ verify_oop(value);
+  __ verify_oop(value, FILE_AND_LINE);
   g1_write_barrier_pre(masm, IN_NATIVE | ON_PHANTOM_OOP_REF,
                        noreg, noreg, value,
                        tmp1, tmp2, needs_frame);
   __ bind(not_weak);
-  __ verify_oop(value);
+  __ verify_oop(value, FILE_AND_LINE);
   __ bind(done);
 }
 
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -113,7 +113,7 @@
   __ clrrdi(tmp1, value, JNIHandles::weak_tag_size);
   __ ld(value, 0, tmp1);      // Resolve (untagged) jobject.
 
-  __ verify_oop(value);
+  __ verify_oop(value, FILE_AND_LINE);
   __ bind(done);
 }
 
--- a/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -30,6 +30,10 @@
 #error "CC_INTERP is no longer supported. Removed in change 8145117."
 #endif
 
+#ifndef FILE_AND_LINE
+#define FILE_AND_LINE __FILE__ ":" XSTR(__LINE__)
+#endif
+
 // Size of PPC Instructions
 const int BytesPerInstWord = 4;
 
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -67,9 +67,6 @@
 
 define_pd_global(bool, PreserveFramePointer,  false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // Default max size of CMS young gen, per GC worker thread.
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -2313,7 +2313,7 @@
 }
 
 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
-  if (state == atos) { MacroAssembler::verify_oop(reg); }
+  if (state == atos) { MacroAssembler::verify_oop(reg, FILE_AND_LINE); }
 }
 
 // Local helper function for the verify_oop_or_return_address macro.
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -3120,7 +3120,7 @@
   li(R0, 0);
   std(R0, in_bytes(JavaThread::vm_result_offset()), R16_thread);
 
-  verify_oop(oop_result);
+  verify_oop(oop_result, FILE_AND_LINE);
 }
 
 void MacroAssembler::get_vm_result_2(Register metadata_result) {
@@ -4917,6 +4917,13 @@
   }
 }
 
+void MacroAssembler::verify_coop(Register coop, const char* msg) {
+  if (!VerifyOops) { return; }
+  if (UseCompressedOops) { decode_heap_oop(coop); }
+  verify_oop(coop, msg);
+  if (UseCompressedOops) { encode_heap_oop(coop, coop); }
+}
+
 // READ: oop. KILL: R0. Volatile floats perhaps.
 void MacroAssembler::verify_oop(Register oop, const char* msg) {
   if (!VerifyOops) {
@@ -4926,6 +4933,9 @@
   address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
   const Register tmp = R11; // Will be preserved.
   const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
+
+  BLOCK_COMMENT("verify_oop {");
+
   save_volatile_gprs(R1_SP, -nbytes_save); // except R0
 
   mr_if_needed(R4_ARG2, oop);
@@ -4942,6 +4952,8 @@
   pop_frame();
   restore_LR_CR(tmp);
   restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
+
+  BLOCK_COMMENT("} verify_oop");
 }
 
 void MacroAssembler::verify_oop_addr(RegisterOrConstant offs, Register base, const char* msg) {
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -914,6 +914,9 @@
   // Verify R16_thread contents.
   void verify_thread();
 
+  // Calls verify_oop. If UseCompressedOops is on, decodes the oop.
+  // Preserves reg.
+  void verify_coop(Register reg, const char*);
   // Emit code to verify that reg contains a valid oop if +VerifyOops is set.
   void verify_oop(Register reg, const char* s = "broken oop");
   void verify_oop_addr(RegisterOrConstant offs, Register base, const char* s = "contains broken oop");
--- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -77,7 +77,7 @@
   Klass* klass = SystemDictionary::well_known_klass(klass_id);
   Label L_ok, L_bad;
   BLOCK_COMMENT("verify_klass {");
-  __ verify_oop(obj_reg);
+  __ verify_oop(obj_reg, FILE_AND_LINE);
   __ cmpdi(CCR0, obj_reg, 0);
   __ beq(CCR0, L_bad);
   __ load_klass(temp_reg, obj_reg);
@@ -172,16 +172,16 @@
   assert(method_temp == R19_method, "required register for loading method");
 
   // Load the invoker, as MH -> MH.form -> LF.vmentry
-  __ verify_oop(recv);
+  __ verify_oop(recv, FILE_AND_LINE);
   __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv,
                    temp2, noreg, false, IS_NOT_NULL);
-  __ verify_oop(method_temp);
+  __ verify_oop(method_temp, FILE_AND_LINE);
   __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp,
                    temp2, noreg, false, IS_NOT_NULL);
-  __ verify_oop(method_temp);
+  __ verify_oop(method_temp, FILE_AND_LINE);
   __ load_heap_oop(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes()), method_temp,
                    temp2, noreg, false, IS_NOT_NULL);
-  __ verify_oop(method_temp);
+  __ verify_oop(method_temp, FILE_AND_LINE);
   __ ld(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes()), method_temp);
 
   if (VerifyMethodHandles && !for_compiler_entry) {
@@ -318,7 +318,7 @@
 
     Register temp1_recv_klass = temp1;
     if (iid != vmIntrinsics::_linkToStatic) {
-      __ verify_oop(receiver_reg);
+      __ verify_oop(receiver_reg, FILE_AND_LINE);
       if (iid == vmIntrinsics::_linkToSpecial) {
         // Don't actually load the klass; just null-check the receiver.
         __ null_check_throw(receiver_reg, -1, temp1,
--- a/src/hotspot/cpu/ppc/ppc.ad	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/ppc.ad	Thu Nov 14 13:50:03 2019 +0000
@@ -6928,25 +6928,6 @@
   ins_pipe(pipe_class_memory);
 %}
 
-// Card-mark for CMS garbage collection.
-// This cardmark does an optimization so that it must not always
-// do a releasing store. For this, it needs the constant address of
-// CMSCollectorCardTableBarrierSetBSExt::_requires_release.
-// This constant address is split off here by expand so we can use
-// adlc / matcher functionality to load it from the constant section.
-instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
-  match(Set mem (StoreCM mem zero));
-  predicate(UseConcMarkSweepGC);
-
-  expand %{
-    immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %}
-    iRegLdst releaseFieldAddress;
-    flagsReg crx;
-    loadConL_Ex(releaseFieldAddress, baseImm);
-    storeCM_CMS(mem, releaseFieldAddress, crx);
-  %}
-%}
-
 instruct storeCM_G1(memory mem, immI_0 zero) %{
   match(Set mem (StoreCM mem zero));
   predicate(UseG1GC);
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1742,9 +1742,9 @@
         assert(r->is_valid(), "bad oop arg");
         if (r->is_stack()) {
           __ ld(temp_reg, reg2offset(r), R1_SP);
-          __ verify_oop(temp_reg);
+          __ verify_oop(temp_reg, FILE_AND_LINE);
         } else {
-          __ verify_oop(r->as_Register());
+          __ verify_oop(r->as_Register(), FILE_AND_LINE);
         }
       }
     }
@@ -2107,7 +2107,7 @@
 
   __ cmpdi(CCR0, R3_ARG1, 0);
   __ beq(CCR0, ic_miss);
-  __ verify_oop(R3_ARG1);
+  __ verify_oop(R3_ARG1, FILE_AND_LINE);
   __ load_klass(receiver_klass, R3_ARG1);
 
   __ cmpd(CCR0, receiver_klass, ic);
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -440,7 +440,6 @@
     StubCodeMark mark(this, "StubRoutines", "forward_exception");
     address start = __ pc();
 
-#if !defined(PRODUCT)
     if (VerifyOops) {
       // Get pending exception oop.
       __ ld(R3_ARG1,
@@ -456,7 +455,6 @@
       }
       __ verify_oop(R3_ARG1, "StubRoutines::forward exception: not an oop");
     }
-#endif
 
     // Save LR/CR and copy exception pc (LR) into R4_ARG2.
     __ save_LR_CR(R4_ARG2);
@@ -702,9 +700,9 @@
 #if !defined(PRODUCT)
   // Wrapper which calls oopDesc::is_oop_or_null()
   // Only called by MacroAssembler::verify_oop
-  static void verify_oop_helper(const char* message, oop o) {
+  static void verify_oop_helper(const char* message, oopDesc* o) {
     if (!oopDesc::is_oop_or_null(o)) {
-      fatal("%s", message);
+      fatal("%s. oop: " PTR_FORMAT, message, p2i(o));
     }
     ++ StubRoutines::_verify_oop_count;
   }
@@ -725,7 +723,6 @@
     return start;
   }
 
-
   // -XX:+OptimizeFill : convert fill/copy loops into intrinsic
   //
   // The code is implemented(ported from sparc) as we believe it benefits JVM98, however
--- a/src/hotspot/cpu/s390/globals_s390.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/s390/globals_s390.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -69,9 +69,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // Default max size of CMS young gen, per GC worker thread.
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -66,9 +66,9 @@
 
 
 void C1_MacroAssembler::verified_entry() {
-  if (C1Breakpoint) breakpoint_trap();
-  // build frame
-  verify_FPU(0, "method_entry");
+  if (C1Breakpoint) {
+    breakpoint_trap();
+  }
 }
 
 
--- a/src/hotspot/cpu/sparc/globals_sparc.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/sparc/globals_sparc.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -74,9 +74,6 @@
 
 define_pd_global(bool, PreserveFramePointer, false);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -91,7 +91,6 @@
 // dispatch.
 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
   assert_not_delayed();
-  verify_FPU(1, state);
   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
   jmp( IdispatchAddress, 0 );
   if (bcp_incr != 0)  delayed()->inc(Lbcp, bcp_incr);
@@ -264,7 +263,6 @@
 // dispatch value in Lbyte_code and increment Lbcp
 
 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify, bool generate_poll) {
-  verify_FPU(1, state);
   // %%%%% maybe implement +VerifyActivationFrameSize here
   //verify_thread(); //too slow; we will just verify on method entry & exit
   if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
@@ -2545,11 +2543,6 @@
 }
 
 
-void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
-  if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
-}
-
-
 // Jump if ((*counter_addr += increment) & mask) satisfies the condition.
 void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
                                                         int increment, Address mask_addr,
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -321,7 +321,7 @@
   // Debugging
   void interp_verify_oop(Register reg, TosState state, const char * file, int line);    // only if +VerifyOops && state == atos
   void verify_oop_or_return_address(Register reg, Register rtmp); // for astore
-  void verify_FPU(int stack_depth, TosState state = ftos); // only if +VerifyFPU  && (state == ftos || state == dtos)
+  void verify_FPU(int stack_depth, TosState state = ftos) {}      // No-op.
 
   // support for JVMTI/Dtrace
   typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1130,21 +1130,6 @@
   }
 }
 
-
-// pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
-void MacroAssembler::push_fTOS() {
-  // %%%%%% need to implement this
-}
-
-// pops double TOS element from CPU stack and pushes on FPU stack
-void MacroAssembler::pop_fTOS() {
-  // %%%%%% need to implement this
-}
-
-void MacroAssembler::empty_FPU_stack() {
-  // %%%%%% need to implement this
-}
-
 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
   // plausibility check for oops
   if (!VerifyOops) return;
@@ -2826,47 +2811,6 @@
    bind(done);
 }
 
-
-
-void MacroAssembler::print_CPU_state() {
-  // %%%%% need to implement this
-}
-
-void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
-  // %%%%% need to implement this
-}
-
-void MacroAssembler::push_IU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_IU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::push_FPU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_FPU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::push_CPU_state() {
-  // %%%%% need to implement this
-}
-
-
-void MacroAssembler::pop_CPU_state() {
-  // %%%%% need to implement this
-}
-
-
-
 void MacroAssembler::verify_tlab() {
 #ifdef ASSERT
   if (UseTLAB && VerifyOops) {
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -649,10 +649,6 @@
   inline void callr( Register s1, Register s2 );
   inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
 
-  // Emits nothing on V8
-  inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
-  inline void iprefetch( Label& L);
-
   inline void tst( Register s );
 
   inline void ret(  bool trace = false );
@@ -1056,23 +1052,6 @@
   // check_and_forward_exception to handle exceptions when it is safe
   void check_and_forward_exception(Register scratch_reg);
 
-  // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
-  void push_fTOS();
-
-  // pops double TOS element from CPU stack and pushes on FPU stack
-  void pop_fTOS();
-
-  void empty_FPU_stack();
-
-  void push_IU_state();
-  void pop_IU_state();
-
-  void push_FPU_state();
-  void pop_FPU_state();
-
-  void push_CPU_state();
-  void pop_CPU_state();
-
   // Returns the byte size of the instructions generated by decode_klass_not_null().
   static int instr_size_for_decode_klass_not_null();
 
@@ -1092,15 +1071,11 @@
 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
 
-        // only if +VerifyOops
-  void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
-        // only if +VerifyFPU
   void stop(const char* msg);                          // prints msg, dumps registers and stops execution
   void warn(const char* msg);                          // prints msg, but don't stop
   void untested(const char* what = "");
   void unimplemented(const char* what = "");
   void should_not_reach_here()                   { stop("should not reach here"); }
-  void print_CPU_state();
 
   // oops in code
   AddressLiteral allocate_oop_address(jobject obj);                          // allocate_index
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -278,13 +278,6 @@
 inline void MacroAssembler::callr( Register s1, Register s2 ) { jmpl( s1, s2, O7 ); }
 inline void MacroAssembler::callr( Register s1, int simm13a, RelocationHolder const& rspec ) { jmpl( s1, simm13a, O7, rspec); }
 
-// prefetch instruction
-inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
-  Assembler::bp( never, true, xcc, pt, d, rt );
-    Assembler::bp( never, true, xcc, pt, d, rt );
-}
-inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
-
 inline void MacroAssembler::tst( Register s ) { orcc( G0, s, G0 ); }
 
 inline void MacroAssembler::ret( bool trace ) {
--- a/src/hotspot/cpu/x86/assembler_x86.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -9163,6 +9163,26 @@
   emit_int8((unsigned char)(0xD0 | encode));
 }
 
+void Assembler::btsq(Address dst, int imm8) {
+  assert(isByte(imm8), "not a byte");
+  InstructionMark im(this);
+  prefixq(dst);
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)0xBA);
+  emit_operand(rbp /* 5 */, dst, 1);
+  emit_int8(imm8);
+}
+
+void Assembler::btrq(Address dst, int imm8) {
+  assert(isByte(imm8), "not a byte");
+  InstructionMark im(this);
+  prefixq(dst);
+  emit_int8((unsigned char)0x0F);
+  emit_int8((unsigned char)0xBA);
+  emit_operand(rsi /* 6 */, dst, 1);
+  emit_int8(imm8);
+}
+
 void Assembler::orq(Address dst, int32_t imm32) {
   InstructionMark im(this);
   prefixq(dst);
--- a/src/hotspot/cpu/x86/assembler_x86.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/x86/assembler_x86.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1592,6 +1592,9 @@
 
 #ifdef _LP64
   void notq(Register dst);
+
+  void btsq(Address dst, int imm8);
+  void btrq(Address dst, int imm8);
 #endif
 
   void orl(Address dst, int32_t imm32);
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -22,8 +22,8 @@
  */
 
 #include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
 #include "gc/shenandoah/shenandoahForwarding.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
@@ -467,8 +467,10 @@
     return;
   }
 
-  // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set
-  if (ShenandoahLoadRefBarrier) {
+  assert((decorators & ON_UNKNOWN_OOP_REF) == 0, "Not expected");
+
+  // 2: load a reference from src location and apply LRB if needed
+  if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
     Register result_dst = dst;
     bool use_tmp1_for_dst = false;
 
@@ -487,9 +489,7 @@
 
     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
 
-    // Native barrier is for concurrent root processing
-    bool in_native = (decorators & IN_NATIVE) != 0;
-    if (in_native && ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
+    if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
       load_reference_barrier_native(masm, dst, src);
     } else {
       load_reference_barrier(masm, dst, src);
@@ -509,28 +509,20 @@
     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
   }
 
-  // 3: apply keep-alive barrier if ShenandoahKeepAliveBarrier is set
-  if (ShenandoahKeepAliveBarrier) {
-    bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
-    bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
-    bool on_reference = on_weak || on_phantom;
-    bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
-    bool keep_alive = ((decorators & AS_NO_KEEPALIVE) == 0) || is_traversal_mode;
-
-    if (on_reference && keep_alive) {
-      const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
-      assert_different_registers(dst, tmp1, tmp_thread);
-      NOT_LP64(__ get_thread(thread));
-      // Generate the SATB pre-barrier code to log the value of
-      // the referent field in an SATB buffer.
-      shenandoah_write_barrier_pre(masm /* masm */,
-                                   noreg /* obj */,
-                                   dst /* pre_val */,
-                                   thread /* thread */,
-                                   tmp1 /* tmp */,
-                                   true /* tosca_live */,
-                                   true /* expand_call */);
-    }
+  // 3: apply keep-alive barrier if needed
+  if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
+    const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
+    assert_different_registers(dst, tmp1, tmp_thread);
+    NOT_LP64(__ get_thread(thread));
+    // Generate the SATB pre-barrier code to log the value of
+    // the referent field in an SATB buffer.
+    shenandoah_write_barrier_pre(masm /* masm */,
+                                 noreg /* obj */,
+                                 dst /* pre_val */,
+                                 thread /* thread */,
+                                 tmp1 /* tmp */,
+                                 true /* tosca_live */,
+                                 true /* expand_call */);
   }
 }
 
--- a/src/hotspot/cpu/x86/globals_x86.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/x86/globals_x86.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -81,9 +81,6 @@
 define_pd_global(bool, RewriteBytecodes,     true);
 define_pd_global(bool, RewriteFrequentPairs, true);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 111);
 
 define_pd_global(bool, CompactStrings, true);
--- a/src/hotspot/cpu/x86/x86_64.ad	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/x86/x86_64.ad	Thu Nov 14 13:50:03 2019 +0000
@@ -3116,6 +3116,26 @@
   interface(CONST_INTER);
 %}
 
+operand immL_Pow2()
+%{
+  predicate(is_power_of_2_long(n->get_long()));
+  match(ConL);
+
+  op_cost(15);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
+operand immL_NotPow2()
+%{
+  predicate(is_power_of_2_long(~n->get_long()));
+  match(ConL);
+
+  op_cost(15);
+  format %{ %}
+  interface(CONST_INTER);
+%}
+
 // Long Immediate zero
 operand immL0()
 %{
@@ -9841,6 +9861,23 @@
   ins_pipe(ialu_mem_imm);
 %}
 
+instruct btrL_mem_imm(memory dst, immL_NotPow2 con, rFlagsReg cr)
+%{
+  // con should be a pure 64-bit immediate given that not(con) is a power of 2
+  // because AND/OR works well enough for 8/32-bit values.
+  predicate(log2_long(~n->in(3)->in(2)->get_long()) > 30);
+
+  match(Set dst (StoreL dst (AndL (LoadL dst) con)));
+  effect(KILL cr);
+
+  ins_cost(125);
+  format %{ "btrq    $dst, log2(not($con))\t# long" %}
+  ins_encode %{
+    __ btrq($dst$$Address, log2_long(~$con$$constant));
+  %}
+  ins_pipe(ialu_mem_imm);
+%}
+
 // BMI1 instructions
 instruct andnL_rReg_rReg_mem(rRegL dst, rRegL src1, memory src2, immL_M1 minus_1, rFlagsReg cr) %{
   match(Set dst (AndL (XorL src1 minus_1) (LoadL src2)));
@@ -10034,6 +10071,23 @@
   ins_pipe(ialu_mem_imm);
 %}
 
+instruct btsL_mem_imm(memory dst, immL_Pow2 con, rFlagsReg cr)
+%{
+  // con should be a pure 64-bit power of 2 immediate
+  // because AND/OR works well enough for 8/32-bit values.
+  predicate(log2_long(n->in(3)->in(2)->get_long()) > 31);
+
+  match(Set dst (StoreL dst (OrL (LoadL dst) con)));
+  effect(KILL cr);
+
+  ins_cost(125);
+  format %{ "btsq    $dst, log2($con)\t# long" %}
+  ins_encode %{
+    __ btsq($dst$$Address, log2_long($con$$constant));
+  %}
+  ins_pipe(ialu_mem_imm);
+%}
+
 // Xor Instructions
 // Xor Register with Register
 instruct xorL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
--- a/src/hotspot/cpu/zero/globals_zero.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/cpu/zero/globals_zero.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -66,9 +66,6 @@
 define_pd_global(bool,  RewriteBytecodes,     true);
 define_pd_global(bool,  RewriteFrequentPairs, true);
 
-// GC Ergo Flags
-define_pd_global(size_t, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
-
 define_pd_global(uintx, TypeProfileLevel, 0);
 
 define_pd_global(bool, PreserveFramePointer, false);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zBackingFile_bsd.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zBackingFile_bsd.hpp"
+#include "gc/z/zErrno.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "logging/log.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
+static int vm_flags_superpage() {
+  if (!ZLargePages::is_explicit()) {
+    return 0;
+  }
+
+  const int page_size_in_megabytes = ZGranuleSize >> 20;
+  return page_size_in_megabytes << VM_FLAGS_SUPERPAGE_SHIFT;
+}
+
+static ZErrno mremap(uintptr_t from_addr, uintptr_t to_addr, size_t size) {
+  mach_vm_address_t remap_addr = to_addr;
+  vm_prot_t remap_cur_prot;
+  vm_prot_t remap_max_prot;
+
+  // Remap memory to an additional location
+  const kern_return_t res = mach_vm_remap(mach_task_self(),
+                                          &remap_addr,
+                                          size,
+                                          0 /* mask */,
+                                          VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | vm_flags_superpage(),
+                                          mach_task_self(),
+                                          from_addr,
+                                          FALSE /* copy */,
+                                          &remap_cur_prot,
+                                          &remap_max_prot,
+                                          VM_INHERIT_COPY);
+
+  return (res == KERN_SUCCESS) ? ZErrno(0) : ZErrno(EINVAL);
+}
+
+ZBackingFile::ZBackingFile() :
+    _base(0),
+    _size(0),
+    _initialized(false) {
+
+  // Reserve address space for virtual backing file
+  _base = (uintptr_t)os::reserve_memory(MaxHeapSize);
+  if (_base == 0) {
+    // Failed
+    log_error(gc)("Failed to reserve address space for virtual backing file");
+    return;
+  }
+
+  // Successfully initialized
+  _initialized = true;
+}
+
+bool ZBackingFile::is_initialized() const {
+  return _initialized;
+}
+
+size_t ZBackingFile::size() const {
+  return _size;
+}
+
+bool ZBackingFile::commit_inner(size_t offset, size_t length) {
+  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
+  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
+
+  log_trace(gc, heap)("Committing memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const uintptr_t addr = _base + offset;
+  const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    log_error(gc)("Failed to commit memory (%s)", err.to_string());
+    return false;
+  }
+
+  const size_t end = offset + length;
+  if (end > _size) {
+    // Record new virtual file size
+    _size = end;
+  }
+
+  // Success
+  return true;
+}
+
+size_t ZBackingFile::commit(size_t offset, size_t length) {
+  // Try to commit the whole region
+  if (commit_inner(offset, length)) {
+    // Success
+    return length;
+  }
+
+  // Failed, try to commit as much as possible
+  size_t start = offset;
+  size_t end = offset + length;
+
+  for (;;) {
+    length = align_down((end - start) / 2, ZGranuleSize);
+    if (length == 0) {
+      // Done, don't commit more
+      return start - offset;
+    }
+
+    if (commit_inner(start, length)) {
+      // Success, try commit more
+      start += length;
+    } else {
+      // Failed, try commit less
+      end -= length;
+    }
+  }
+}
+
+size_t ZBackingFile::uncommit(size_t offset, size_t length) {
+  assert(is_aligned(offset, os::vm_page_size()), "Invalid offset");
+  assert(is_aligned(length, os::vm_page_size()), "Invalid length");
+
+  log_trace(gc, heap)("Uncommitting memory: " SIZE_FORMAT "M-" SIZE_FORMAT "M (" SIZE_FORMAT "M)",
+                      offset / M, (offset + length) / M, length / M);
+
+  const uintptr_t start = _base + offset;
+  const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    log_error(gc)("Failed to uncommit memory (%s)", err.to_string());
+    return 0;
+  }
+
+  return length;
+}
+
+void ZBackingFile::map(uintptr_t addr, size_t size, uintptr_t offset) const {
+  const ZErrno err = mremap(_base + offset, addr, size);
+  if (err) {
+    fatal("Failed to remap memory (%s)", err.to_string());
+  }
+}
+
+void ZBackingFile::unmap(uintptr_t addr, size_t size) const {
+  // Note that we must keep the address space reservation intact and just detach
+  // the backing memory. For this reason we map a new anonymous, non-accessible
+  // and non-reserved page over the mapping instead of actually unmapping.
+  const void* const res = mmap((void*)addr, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
+  if (res == MAP_FAILED) {
+    ZErrno err;
+    fatal("Failed to map memory (%s)", err.to_string());
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zBackingFile_bsd.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
+#define OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
+
+#include "memory/allocation.hpp"
+
+class ZPhysicalMemory;
+
+// On macOS, we use a virtual backing file. It is represented by a reserved virtual
+// address space, in which we commit physical memory using the mach_vm_map() API.
+// The multi-mapping API simply remaps these addresses using mach_vm_remap() into
+// the different heap views. This works as-if there was a backing file, it's just
+// that the file is represented with memory mappings instead.
+
+class ZBackingFile {
+private:
+  uintptr_t _base;
+  size_t    _size;
+  bool      _initialized;
+
+  bool commit_inner(size_t offset, size_t length);
+
+public:
+  ZBackingFile();
+
+  bool is_initialized() const;
+
+  size_t size() const;
+
+  size_t commit(size_t offset, size_t length);
+  size_t uncommit(size_t offset, size_t length);
+
+  void map(uintptr_t addr, size_t size, uintptr_t offset) const;
+  void unmap(uintptr_t addr, size_t size) const;
+};
+
+#endif // OS_BSD_GC_Z_ZBACKINGFILE_BSD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zLargePages_bsd.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLargePages.hpp"
+#include "runtime/globals.hpp"
+
+void ZLargePages::initialize_platform() {
+  if (UseLargePages) {
+    _state = Explicit;
+  } else {
+    _state = Disabled;
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zNUMA.hpp"
+
+void ZNUMA::initialize_platform() {
+  _enabled = false;
+}
+
+uint32_t ZNUMA::count() {
+  return 1;
+}
+
+uint32_t ZNUMA::id() {
+  return 0;
+}
+
+uint32_t ZNUMA::memory_id(uintptr_t addr) {
+  // NUMA support not enabled, assume everything belongs to node zero
+  return 0;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLargePages.inline.hpp"
+#include "gc/z/zPhysicalMemory.inline.hpp"
+#include "gc/z/zPhysicalMemoryBacking_bsd.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/init.hpp"
+#include "runtime/os.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+
+bool ZPhysicalMemoryBacking::is_initialized() const {
+  return _file.is_initialized();
+}
+
+void ZPhysicalMemoryBacking::warn_commit_limits(size_t max) const {
+  // Does nothing
+}
+
+bool ZPhysicalMemoryBacking::supports_uncommit() {
+  assert(!is_init_completed(), "Invalid state");
+  assert(_file.size() >= ZGranuleSize, "Invalid size");
+
+  // Test if uncommit is supported by uncommitting and then re-committing a granule
+  return commit(uncommit(ZGranuleSize)) == ZGranuleSize;
+}
+
+size_t ZPhysicalMemoryBacking::commit(size_t size) {
+  size_t committed = 0;
+
+  // Fill holes in the backing file
+  while (committed < size) {
+    size_t allocated = 0;
+    const size_t remaining = size - committed;
+    const uintptr_t start = _uncommitted.alloc_from_front_at_most(remaining, &allocated);
+    if (start == UINTPTR_MAX) {
+      // No holes to commit
+      break;
+    }
+
+    // Try commit hole
+    const size_t filled = _file.commit(start, allocated);
+    if (filled > 0) {
+      // Successful or partialy successful
+      _committed.free(start, filled);
+      committed += filled;
+    }
+    if (filled < allocated) {
+      // Failed or partialy failed
+      _uncommitted.free(start + filled, allocated - filled);
+      return committed;
+    }
+  }
+
+  // Expand backing file
+  if (committed < size) {
+    const size_t remaining = size - committed;
+    const uintptr_t start = _file.size();
+    const size_t expanded = _file.commit(start, remaining);
+    if (expanded > 0) {
+      // Successful or partialy successful
+      _committed.free(start, expanded);
+      committed += expanded;
+    }
+  }
+
+  return committed;
+}
+
+size_t ZPhysicalMemoryBacking::uncommit(size_t size) {
+  size_t uncommitted = 0;
+
+  // Punch holes in backing file
+  while (uncommitted < size) {
+    size_t allocated = 0;
+    const size_t remaining = size - uncommitted;
+    const uintptr_t start = _committed.alloc_from_back_at_most(remaining, &allocated);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+
+    // Try punch hole
+    const size_t punched = _file.uncommit(start, allocated);
+    if (punched > 0) {
+      // Successful or partialy successful
+      _uncommitted.free(start, punched);
+      uncommitted += punched;
+    }
+    if (punched < allocated) {
+      // Failed or partialy failed
+      _committed.free(start + punched, allocated - punched);
+      return uncommitted;
+    }
+  }
+
+  return uncommitted;
+}
+
+ZPhysicalMemory ZPhysicalMemoryBacking::alloc(size_t size) {
+  assert(is_aligned(size, ZGranuleSize), "Invalid size");
+
+  ZPhysicalMemory pmem;
+
+  // Allocate segments
+  for (size_t allocated = 0; allocated < size; allocated += ZGranuleSize) {
+    const uintptr_t start = _committed.alloc_from_front(ZGranuleSize);
+    assert(start != UINTPTR_MAX, "Allocation should never fail");
+    pmem.add_segment(ZPhysicalMemorySegment(start, ZGranuleSize));
+  }
+
+  return pmem;
+}
+
+void ZPhysicalMemoryBacking::free(const ZPhysicalMemory& pmem) {
+  const size_t nsegments = pmem.nsegments();
+
+  // Free segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    _committed.free(segment.start(), segment.size());
+  }
+}
+
+void ZPhysicalMemoryBacking::pretouch_view(uintptr_t addr, size_t size) const {
+  const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size();
+  os::pretouch_memory((void*)addr, (void*)(addr + size), page_size);
+}
+
+void ZPhysicalMemoryBacking::map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const {
+  const size_t nsegments = pmem.nsegments();
+  size_t size = 0;
+
+  // Map segments
+  for (size_t i = 0; i < nsegments; i++) {
+    const ZPhysicalMemorySegment& segment = pmem.segment(i);
+    const uintptr_t segment_addr = addr + size;
+    _file.map(segment_addr, segment.size(), segment.start());
+    size += segment.size();
+  }
+
+  // Pre-touch memory
+  if (pretouch) {
+    pretouch_view(addr, size);
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const {
+  _file.unmap(addr, pmem.size());
+}
+
+uintptr_t ZPhysicalMemoryBacking::nmt_address(uintptr_t offset) const {
+  // From an NMT point of view we treat the first heap view (marked0) as committed
+  return ZAddress::marked0(offset);
+}
+
+void ZPhysicalMemoryBacking::map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  if (ZVerifyViews) {
+    // Map good view
+    map_view(pmem, ZAddress::good(offset), AlwaysPreTouch);
+  } else {
+    // Map all views
+    map_view(pmem, ZAddress::marked0(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::marked1(offset), AlwaysPreTouch);
+    map_view(pmem, ZAddress::remapped(offset), AlwaysPreTouch);
+  }
+}
+
+void ZPhysicalMemoryBacking::unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  if (ZVerifyViews) {
+    // Unmap good view
+    unmap_view(pmem, ZAddress::good(offset));
+  } else {
+    // Unmap all views
+    unmap_view(pmem, ZAddress::marked0(offset));
+    unmap_view(pmem, ZAddress::marked1(offset));
+    unmap_view(pmem, ZAddress::remapped(offset));
+  }
+}
+
+void ZPhysicalMemoryBacking::debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  // Map good view
+  assert(ZVerifyViews, "Should be enabled");
+  map_view(pmem, ZAddress::good(offset), false /* pretouch */);
+}
+
+void ZPhysicalMemoryBacking::debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const {
+  // Unmap good view
+  assert(ZVerifyViews, "Should be enabled");
+  unmap_view(pmem, ZAddress::good(offset));
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
+#define OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
+
+#include "gc/z/zBackingFile_bsd.hpp"
+#include "gc/z/zMemory.hpp"
+
+class ZPhysicalMemory;
+
+class ZPhysicalMemoryBacking {
+private:
+  ZBackingFile   _file;
+  ZMemoryManager _committed;
+  ZMemoryManager _uncommitted;
+
+  void pretouch_view(uintptr_t addr, size_t size) const;
+  void map_view(const ZPhysicalMemory& pmem, uintptr_t addr, bool pretouch) const;
+  void unmap_view(const ZPhysicalMemory& pmem, uintptr_t addr) const;
+
+public:
+  bool is_initialized() const;
+
+  void warn_commit_limits(size_t max) const;
+  bool supports_uncommit();
+
+  size_t commit(size_t size);
+  size_t uncommit(size_t size);
+
+  ZPhysicalMemory alloc(size_t size);
+  void free(const ZPhysicalMemory& pmem);
+
+  uintptr_t nmt_address(uintptr_t offset) const;
+
+  void map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+
+  void debug_map(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+  void debug_unmap(const ZPhysicalMemory& pmem, uintptr_t offset) const;
+};
+
+#endif // OS_BSD_GC_Z_ZPHYSICALMEMORYBACKING_BSD_HPP
--- a/src/hotspot/os/bsd/os_bsd.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -2005,6 +2005,10 @@
   return 0;
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  return 0;
+}
+
 bool os::get_page_info(char *start, page_info* info) {
   return false;
 }
@@ -2845,15 +2849,11 @@
     // and if UserSignalHandler is installed all bets are off
     if (CheckJNICalls) {
       if (libjsig_is_loaded) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: libjsig is activated, all active signal checking is disabled");
         check_signals = false;
       }
       if (AllowUserSignalHandlers) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
         check_signals = false;
       }
     }
--- a/src/hotspot/os/linux/os_linux.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/os/linux/os_linux.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -3007,6 +3007,19 @@
   return 0;
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  void** pages = const_cast<void**>(&address);
+  int id = -1;
+
+  if (os::Linux::numa_move_pages(0, 1, pages, NULL, &id, 0) == -1) {
+    return -1;
+  }
+  if (id < 0) {
+    return -1;
+  }
+  return id;
+}
+
 int os::Linux::get_existing_num_nodes() {
   int node;
   int highest_node_number = Linux::numa_max_node();
@@ -3135,6 +3148,8 @@
                                           libnuma_v2_dlsym(handle, "numa_get_membind")));
       set_numa_get_interleave_mask(CAST_TO_FN_PTR(numa_get_interleave_mask_func_t,
                                                   libnuma_v2_dlsym(handle, "numa_get_interleave_mask")));
+      set_numa_move_pages(CAST_TO_FN_PTR(numa_move_pages_func_t,
+                                         libnuma_dlsym(handle, "numa_move_pages")));
 
       if (numa_available() != -1) {
         set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
@@ -3269,6 +3284,7 @@
 os::Linux::numa_distance_func_t os::Linux::_numa_distance;
 os::Linux::numa_get_membind_func_t os::Linux::_numa_get_membind;
 os::Linux::numa_get_interleave_mask_func_t os::Linux::_numa_get_interleave_mask;
+os::Linux::numa_move_pages_func_t os::Linux::_numa_move_pages;
 os::Linux::NumaAllocationPolicy os::Linux::_current_numa_policy;
 unsigned long* os::Linux::_numa_all_nodes;
 struct bitmask* os::Linux::_numa_all_nodes_ptr;
@@ -4789,15 +4805,11 @@
     // Log that signal checking is off only if -verbose:jni is specified.
     if (CheckJNICalls) {
       if (libjsig_is_loaded) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: libjsig is activated, all active signal checking is disabled");
         check_signals = false;
       }
       if (AllowUserSignalHandlers) {
-        if (PrintJNIResolving) {
-          tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
-        }
+        log_debug(jni, resolve)("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
         check_signals = false;
       }
     }
--- a/src/hotspot/os/linux/os_linux.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/os/linux/os_linux.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -216,6 +216,7 @@
   typedef void (*numa_interleave_memory_v2_func_t)(void *start, size_t size, struct bitmask* mask);
   typedef struct bitmask* (*numa_get_membind_func_t)(void);
   typedef struct bitmask* (*numa_get_interleave_mask_func_t)(void);
+  typedef long (*numa_move_pages_func_t)(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags);
 
   typedef void (*numa_set_bind_policy_func_t)(int policy);
   typedef int (*numa_bitmask_isbitset_func_t)(struct bitmask *bmp, unsigned int n);
@@ -234,6 +235,7 @@
   static numa_distance_func_t _numa_distance;
   static numa_get_membind_func_t _numa_get_membind;
   static numa_get_interleave_mask_func_t _numa_get_interleave_mask;
+  static numa_move_pages_func_t _numa_move_pages;
   static unsigned long* _numa_all_nodes;
   static struct bitmask* _numa_all_nodes_ptr;
   static struct bitmask* _numa_nodes_ptr;
@@ -253,6 +255,7 @@
   static void set_numa_distance(numa_distance_func_t func) { _numa_distance = func; }
   static void set_numa_get_membind(numa_get_membind_func_t func) { _numa_get_membind = func; }
   static void set_numa_get_interleave_mask(numa_get_interleave_mask_func_t func) { _numa_get_interleave_mask = func; }
+  static void set_numa_move_pages(numa_move_pages_func_t func) { _numa_move_pages = func; }
   static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
   static void set_numa_all_nodes_ptr(struct bitmask **ptr) { _numa_all_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
   static void set_numa_nodes_ptr(struct bitmask **ptr) { _numa_nodes_ptr = (ptr == NULL ? NULL : *ptr); }
@@ -318,6 +321,9 @@
   static int numa_distance(int node1, int node2) {
     return _numa_distance != NULL ? _numa_distance(node1, node2) : -1;
   }
+  static long numa_move_pages(int pid, unsigned long count, void **pages, const int *nodes, int *status, int flags) {
+    return _numa_move_pages != NULL ? _numa_move_pages(pid, count, pages, nodes, status, flags) : -1;
+  }
   static int get_node_by_cpu(int cpu_id);
   static int get_existing_num_nodes();
   // Check if numa node is configured (non-zero memory node).
--- a/src/hotspot/os/solaris/os_solaris.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -2072,7 +2072,7 @@
   char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
   if (res != NULL) {
     if (UseNUMAInterleaving) {
-      numa_make_global(addr, bytes);
+        numa_make_global(addr, bytes);
     }
     return 0;
   }
@@ -2267,6 +2267,10 @@
   return ids[os::random() % r];
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  return 0;
+}
+
 // Request information about the page.
 bool os::get_page_info(char *start, page_info* info) {
   const uint_t info_types[] = { MEMINFO_VLGRP, MEMINFO_VPAGESIZE };
@@ -3684,15 +3688,11 @@
   // Log that signal checking is off only if -verbose:jni is specified.
   if (CheckJNICalls) {
     if (libjsig_is_loaded) {
-      if (PrintJNIResolving) {
-        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
-      }
+      log_debug(jni, resolve)("Info: libjsig is activated, all active signal checking is disabled");
       check_signals = false;
     }
     if (AllowUserSignalHandlers) {
-      if (PrintJNIResolving) {
-        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
-      }
+      log_debug(jni, resolve)("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
       check_signals = false;
     }
   }
--- a/src/hotspot/os/windows/os_windows.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/os/windows/os_windows.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -3447,6 +3447,10 @@
   }
 }
 
+int os::numa_get_group_id_for_address(const void* address) {
+  return 0;
+}
+
 bool os::get_page_info(char *start, page_info* info) {
   return false;
 }
@@ -5688,7 +5692,7 @@
 // up the offset from FS of the thread pointer.
 void os::win32::initialize_thread_ptr_offset() {
   os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
-                           NULL, NULL, NULL, NULL);
+                           NULL, methodHandle(), NULL, NULL);
 }
 
 bool os::supports_map_sync() {
--- a/src/hotspot/share/adlc/output_h.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/adlc/output_h.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -758,10 +758,6 @@
       fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask(uint mask1, uint mask2) : _mask((((uint64_t)mask1) << 32) | mask2) {}\n\n");
       fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask(uint64_t mask) : _mask(mask) {}\n\n");
     }
-    fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask& operator=(const Pipeline_Use_Cycle_Mask &in) {\n");
-    fprintf(fp_hpp, "    _mask = in._mask;\n");
-    fprintf(fp_hpp, "    return *this;\n");
-    fprintf(fp_hpp, "  }\n\n");
     fprintf(fp_hpp, "  bool overlaps(const Pipeline_Use_Cycle_Mask &in2) const {\n");
     fprintf(fp_hpp, "    return ((_mask & in2._mask) != 0);\n");
     fprintf(fp_hpp, "  }\n\n");
@@ -792,11 +788,6 @@
     for (l = 1; l <= masklen; l++)
       fprintf(fp_hpp, "_mask%d(mask%d)%s", l, l, l < masklen ? ", " : " {}\n\n");
 
-    fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask& operator=(const Pipeline_Use_Cycle_Mask &in) {\n");
-    for (l = 1; l <= masklen; l++)
-      fprintf(fp_hpp, "    _mask%d = in._mask%d;\n", l, l);
-    fprintf(fp_hpp, "    return *this;\n");
-    fprintf(fp_hpp, "  }\n\n");
     fprintf(fp_hpp, "  Pipeline_Use_Cycle_Mask intersect(const Pipeline_Use_Cycle_Mask &in2) {\n");
     fprintf(fp_hpp, "    Pipeline_Use_Cycle_Mask out;\n");
     for (l = 1; l <= masklen; l++)
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -160,8 +160,6 @@
 }
 
 bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
-  // Make sure the method is not flushed in case of a safepoint in code below.
-  methodHandle the_method(method());
   NoSafepointVerifier nsv;
 
   {
@@ -208,10 +206,7 @@
 bool AOTCompiledMethod::make_entrant() {
   assert(!method()->is_old(), "reviving evolved method!");
 
-  // Make sure the method is not flushed in case of a safepoint in code below.
-  methodHandle the_method(method());
   NoSafepointVerifier nsv;
-
   {
     // Enter critical section.  Does not block for safepoint.
     MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -2590,7 +2590,7 @@
 
 #ifdef ASSERT
   for_each_phi_fun(b, phi,
-                   assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification");
+                   assert(phi->operand_count() != 1 || phi->subst() != phi || phi->is_illegal(), "missed trivial simplification");
   );
 
   ValueStack* state = b->state()->caller_state();
--- a/src/hotspot/share/c1/c1_Optimizer.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/c1/c1_Optimizer.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -366,6 +366,8 @@
           assert(sux_value == end_state->stack_at(index), "stack not equal");
         }
         for_each_local_value(sux_state, index, sux_value) {
+          Phi* sux_phi = sux_value->as_Phi();
+          if (sux_phi != NULL && sux_phi->is_illegal()) continue;
           assert(sux_value == end_state->local_at(index), "locals not equal");
         }
         assert(sux_state->caller_state() == end_state->caller_state(), "caller not equal");
--- a/src/hotspot/share/c1/c1_Runtime1.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1422,7 +1422,7 @@
   assert (nm != NULL, "no more nmethod?");
   nm->make_not_entrant();
 
-  methodHandle m(nm->method());
+  methodHandle m(thread, nm->method());
   MethodData* mdo = m->method_data();
 
   if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
@@ -1443,7 +1443,7 @@
   if (TracePredicateFailedTraps) {
     stringStream ss1, ss2;
     vframeStream vfst(thread);
-    methodHandle inlinee = methodHandle(vfst.method());
+    Method* inlinee = vfst.method();
     inlinee->print_short_name(&ss1);
     m->print_short_name(&ss2);
     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));
--- a/src/hotspot/share/ci/ciEnv.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/ci/ciEnv.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -760,7 +760,7 @@
 
   InstanceKlass* accessor_klass = accessor->get_instanceKlass();
   Klass* holder_klass = holder->get_Klass();
-  methodHandle dest_method;
+  Method* dest_method;
   LinkInfo link_info(holder_klass, name, sig, accessor_klass, LinkInfo::needs_access_check, tag);
   switch (bc) {
   case Bytecodes::_invokestatic:
@@ -782,7 +782,7 @@
   default: ShouldNotReachHere();
   }
 
-  return dest_method();
+  return dest_method;
 }
 
 
--- a/src/hotspot/share/ci/ciExceptionHandler.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/ci/ciExceptionHandler.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
   if (_catch_klass == NULL) {
     bool will_link;
     assert(_loading_klass->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
-    constantPoolHandle cpool(_loading_klass->get_instanceKlass()->constants());
+    constantPoolHandle cpool(THREAD, _loading_klass->get_instanceKlass()->constants());
     ciKlass* k = CURRENT_ENV->get_klass_by_index(cpool,
                                                  _catch_klass_index,
                                                  will_link,
--- a/src/hotspot/share/ci/ciField.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/ci/ciField.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -391,7 +391,7 @@
 
   LinkInfo link_info(_holder->get_instanceKlass(),
                      _name->get_symbol(), _signature->get_symbol(),
-                     accessing_method->get_Method());
+                     methodHandle(THREAD, accessing_method->get_Method()));
   fieldDescriptor result;
   LinkResolver::resolve_field(result, link_info, bc, false, KILL_COMPILE_ON_FATAL_(false));
 
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -32,7 +32,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
--- a/src/hotspot/share/ci/ciMethod.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/ci/ciMethod.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -72,25 +72,25 @@
   assert(h_m() != NULL, "no null method");
 
   if (LogTouchedMethods) {
-    h_m()->log_touched(Thread::current());
+    h_m->log_touched(Thread::current());
   }
   // These fields are always filled in in loaded methods.
-  _flags = ciFlags(h_m()->access_flags());
+  _flags = ciFlags(h_m->access_flags());
 
   // Easy to compute, so fill them in now.
-  _max_stack          = h_m()->max_stack();
-  _max_locals         = h_m()->max_locals();
-  _code_size          = h_m()->code_size();
-  _intrinsic_id       = h_m()->intrinsic_id();
-  _handler_count      = h_m()->exception_table_length();
-  _size_of_parameters = h_m()->size_of_parameters();
-  _uses_monitors      = h_m()->access_flags().has_monitor_bytecodes();
-  _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
-  _is_c1_compilable   = !h_m()->is_not_c1_compilable();
-  _is_c2_compilable   = !h_m()->is_not_c2_compilable();
+  _max_stack          = h_m->max_stack();
+  _max_locals         = h_m->max_locals();
+  _code_size          = h_m->code_size();
+  _intrinsic_id       = h_m->intrinsic_id();
+  _handler_count      = h_m->exception_table_length();
+  _size_of_parameters = h_m->size_of_parameters();
+  _uses_monitors      = h_m->access_flags().has_monitor_bytecodes();
+  _balanced_monitors  = !_uses_monitors || h_m->access_flags().is_monitor_matching();
+  _is_c1_compilable   = !h_m->is_not_c1_compilable();
+  _is_c2_compilable   = !h_m->is_not_c2_compilable();
   _can_be_parsed      = true;
-  _has_reserved_stack_access = h_m()->has_reserved_stack_access();
-  _is_overpass        = h_m()->is_overpass();
+  _has_reserved_stack_access = h_m->has_reserved_stack_access();
+  _is_overpass        = h_m->is_overpass();
   // Lazy fields, filled in on demand.  Require allocation.
   _code               = NULL;
   _exception_handlers = NULL;
@@ -114,8 +114,8 @@
     DEBUG_ONLY(CompilerThread::current()->check_possible_safepoint());
   }
 
-  if (h_m()->method_holder()->is_linked()) {
-    _can_be_statically_bound = h_m()->can_be_statically_bound();
+  if (h_m->method_holder()->is_linked()) {
+    _can_be_statically_bound = h_m->can_be_statically_bound();
   } else {
     // Have to use a conservative value in this case.
     _can_be_statically_bound = false;
@@ -123,25 +123,25 @@
 
   // Adjust the definition of this condition to be more useful:
   // %%% take these conditions into account in vtable generation
-  if (!_can_be_statically_bound && h_m()->is_private())
+  if (!_can_be_statically_bound && h_m->is_private())
     _can_be_statically_bound = true;
-  if (_can_be_statically_bound && h_m()->is_abstract())
+  if (_can_be_statically_bound && h_m->is_abstract())
     _can_be_statically_bound = false;
 
   // generating _signature may allow GC and therefore move m.
   // These fields are always filled in.
-  _name = env->get_symbol(h_m()->name());
-  ciSymbol* sig_symbol = env->get_symbol(h_m()->signature());
-  constantPoolHandle cpool = h_m()->constants();
+  _name = env->get_symbol(h_m->name());
+  ciSymbol* sig_symbol = env->get_symbol(h_m->signature());
+  constantPoolHandle cpool(Thread::current(), h_m->constants());
   _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol);
   _method_data = NULL;
-  _nmethod_age = h_m()->nmethod_age();
+  _nmethod_age = h_m->nmethod_age();
   // Take a snapshot of these values, so they will be commensurate with the MDO.
   if (ProfileInterpreter || TieredCompilation) {
-    int invcnt = h_m()->interpreter_invocation_count();
+    int invcnt = h_m->interpreter_invocation_count();
     // if the value overflowed report it as max int
     _interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ;
-    _interpreter_throwout_count   = h_m()->interpreter_throwout_count();
+    _interpreter_throwout_count   = h_m->interpreter_throwout_count();
   } else {
     _interpreter_invocation_count = 0;
     _interpreter_throwout_count = 0;
@@ -431,7 +431,7 @@
 ResourceBitMap ciMethod::live_local_oops_at_bci(int bci) {
   VM_ENTRY_MARK;
   InterpreterOopMap mask;
-  OopMapCache::compute_one_oop_map(get_Method(), bci, &mask);
+  OopMapCache::compute_one_oop_map(methodHandle(THREAD, get_Method()), bci, &mask);
   int mask_size = max_locals();
   ResourceBitMap result(mask_size);
   int i;
@@ -749,8 +749,8 @@
   {
     MutexLocker locker(Compile_lock);
     Klass* context = actual_recv->get_Klass();
-    target = Dependencies::find_unique_concrete_method(context,
-                                                       root_m->get_Method());
+    target = methodHandle(THREAD, Dependencies::find_unique_concrete_method(context,
+                                                       root_m->get_Method()));
     // %%% Should upgrade this ciMethod API to look for 1 or 2 concrete methods.
   }
 
@@ -810,7 +810,7 @@
 
    LinkInfo link_info(resolved, h_name, h_signature, caller_klass,
                       check_access ? LinkInfo::needs_access_check : LinkInfo::skip_access_check);
-   methodHandle m;
+   Method* m = NULL;
    // Only do exact lookup if receiver klass has been linked.  Otherwise,
    // the vtable has not been setup, and the LinkResolver will fail.
    if (recv->is_array_klass()
@@ -823,14 +823,14 @@
      }
    }
 
-   if (m.is_null()) {
+   if (m == NULL) {
      // Return NULL only if there was a problem with lookup (uninitialized class, etc.)
      return NULL;
    }
 
    ciMethod* result = this;
-   if (m() != get_Method()) {
-     result = CURRENT_THREAD_ENV->get_method(m());
+   if (m != get_Method()) {
+     result = CURRENT_THREAD_ENV->get_method(m);
    }
 
    // Don't return abstract methods because they aren't
@@ -1035,7 +1035,8 @@
   bool result = true;
   if (_method_data == NULL || _method_data->is_empty()) {
     GUARDED_VM_ENTRY({
-      result = ensure_method_data(get_Method());
+      methodHandle mh(Thread::current(), get_Method());
+      result = ensure_method_data(mh);
     });
   }
   return result;
@@ -1268,7 +1269,7 @@
     HandleMark hm(THREAD);
     constantPoolHandle pool (THREAD, get_Method()->constants());
     Bytecodes::Code code = (is_static ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual);
-    methodHandle spec_method = LinkResolver::resolve_method_statically(code, pool, refinfo_index, THREAD);
+    Method* spec_method = LinkResolver::resolve_method_statically(code, pool, refinfo_index, THREAD);
     if (HAS_PENDING_EXCEPTION) {
       CLEAR_PENDING_EXCEPTION;
       return false;
--- a/src/hotspot/share/ci/ciReplay.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/ci/ciReplay.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -597,7 +597,7 @@
       nm->make_not_entrant();
     }
     replay_state = this;
-    CompileBroker::compile_method(method, entry_bci, comp_level,
+    CompileBroker::compile_method(methodHandle(THREAD, method), entry_bci, comp_level,
                                   methodHandle(), 0, CompileTask::Reason_Replay, THREAD);
     replay_state = NULL;
     reset();
@@ -634,7 +634,7 @@
       MutexLocker ml(MethodData_lock, THREAD);
       if (method->method_data() == NULL) {
         ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
-        MethodData* method_data = MethodData::allocate(loader_data, method, CHECK);
+        MethodData* method_data = MethodData::allocate(loader_data, methodHandle(THREAD, method), CHECK);
         method->set_method_data(method_data);
       }
     }
--- a/src/hotspot/share/ci/ciStreams.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/ci/ciStreams.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -186,7 +186,7 @@
 // or checkcast, get the referenced klass.
 ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   return CURRENT_ENV->get_klass_by_index(cpool, get_klass_index(), will_link, _holder);
 }
 
@@ -217,7 +217,7 @@
   int index = get_constant_raw_index();
   if (has_cache_index()) {
     VM_ENTRY_MARK;
-    constantPoolHandle cpool(_method->get_Method()->constants());
+    constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
     return cpool->object_to_cp_index(index);
   }
   return index;
@@ -236,7 +236,7 @@
     pool_index = -1;
   }
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   return CURRENT_ENV->get_constant_by_index(cpool, pool_index, cache_index, _holder);
 }
 
@@ -289,7 +289,7 @@
 // for checking linkability when retrieving the associated field.
 ciInstanceKlass* ciBytecodeStream::get_declared_field_holder() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   int holder_index = get_field_holder_index();
   bool ignore;
   return CURRENT_ENV->get_klass_by_index(cpool, holder_index, ignore, _holder)
@@ -431,7 +431,7 @@
 // constant pool cache at the current bci.
 bool ciBytecodeStream::has_appendix() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   return ConstantPool::has_appendix_at_if_loaded(cpool, get_method_index());
 }
 
@@ -442,7 +442,7 @@
 // the current bci.
 ciObject* ciBytecodeStream::get_appendix() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   oop appendix_oop = ConstantPool::appendix_at_if_loaded(cpool, get_method_index());
   return CURRENT_ENV->get_object(appendix_oop);
 }
@@ -454,7 +454,7 @@
 // pool cache at the current bci has a local signature.
 bool ciBytecodeStream::has_local_signature() {
   GUARDED_VM_ENTRY(
-    constantPoolHandle cpool(_method->get_Method()->constants());
+    constantPoolHandle cpool(Thread::current(), _method->get_Method()->constants());
     return ConstantPool::has_local_signature_at_if_loaded(cpool, get_method_index());
   )
 }
@@ -472,7 +472,7 @@
 // for checking linkability when retrieving the associated method.
 ciKlass* ciBytecodeStream::get_declared_method_holder() {
   VM_ENTRY_MARK;
-  constantPoolHandle cpool(_method->get_Method()->constants());
+  constantPoolHandle cpool(THREAD, _method->get_Method()->constants());
   bool ignore;
   // report as MethodHandle for invokedynamic, which is syntactically classless
   if (cur_bc() == Bytecodes::_invokedynamic)
--- a/src/hotspot/share/classfile/bytecodeAssembler.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/bytecodeAssembler.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -54,7 +54,8 @@
       _orig->length() + _entries.length(), CHECK_NULL);
 
   cp->set_pool_holder(_orig->pool_holder());
-  _orig->copy_cp_to(1, _orig->length() - 1, cp, 1, CHECK_NULL);
+  constantPoolHandle cp_h(THREAD, cp);
+  _orig->copy_cp_to(1, _orig->length() - 1, cp_h, 1, CHECK_NULL);
 
   // Preserve dynamic constant information from the original pool
   if (_orig->has_dynamic_constant()) {
--- a/src/hotspot/share/classfile/classFileParser.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -47,7 +47,7 @@
 #include "memory/universe.hpp"
 #include "oops/annotations.hpp"
 #include "oops/constantPool.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/klass.inline.hpp"
@@ -332,7 +332,7 @@
           hashValues[names_count++] = hash;
           if (names_count == SymbolTable::symbol_alloc_batch_size) {
             SymbolTable::new_symbols(_loader_data,
-                                     cp,
+                                     constantPoolHandle(THREAD, cp),
                                      names_count,
                                      names,
                                      lengths,
@@ -369,7 +369,7 @@
   // Allocate the remaining symbols
   if (names_count > 0) {
     SymbolTable::new_symbols(_loader_data,
-                             cp,
+                             constantPoolHandle(THREAD, cp),
                              names_count,
                              names,
                              lengths,
@@ -2870,7 +2870,7 @@
   }
 
   if (parsed_annotations.has_any_annotations())
-    parsed_annotations.apply_to(m);
+    parsed_annotations.apply_to(methodHandle(THREAD, m));
 
   // Copy annotations
   copy_method_annotations(m->constMethod(),
@@ -3753,7 +3753,7 @@
 #ifndef PRODUCT
 static void print_field_layout(const Symbol* name,
                                Array<u2>* fields,
-                               const constantPoolHandle& cp,
+                               ConstantPool* cp,
                                int instance_size,
                                int instance_fields_start,
                                int instance_fields_end,
--- a/src/hotspot/share/classfile/javaClasses.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -42,7 +42,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
 #include "oops/klass.hpp"
@@ -1077,7 +1077,7 @@
       Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
       assert(ak != NULL || t == T_VOID, "should not be NULL");
       if (ak != NULL) {
-        Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak);
+        Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak, true);
         archived_m->metadata_field_put(_array_klass_offset, reloc_ak);
       }
 
@@ -1222,7 +1222,7 @@
   // The archived mirror's field at _klass_offset is still pointing to the original
   // klass. Updated the field in the archived mirror to point to the relocated
   // klass in the archive.
-  Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror));
+  Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror), true);
   log_debug(cds, heap, mirror)(
     "Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
     p2i(as_Klass(mirror)), p2i(reloc_k));
@@ -1232,7 +1232,7 @@
   // higher array klass if exists. Relocate the pointer.
   Klass *arr = array_klass_acquire(mirror);
   if (arr != NULL) {
-    Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr);
+    Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr, true);
     log_debug(cds, heap, mirror)(
       "Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
       p2i(arr), p2i(reloc_arr));
@@ -1241,6 +1241,33 @@
   return archived_mirror;
 }
 
+void java_lang_Class::update_archived_primitive_mirror_native_pointers(oop archived_mirror) {
+  if (MetaspaceShared::relocation_delta() != 0) {
+    assert(archived_mirror->metadata_field(_klass_offset) == NULL, "must be for primitive class");
+
+    Klass* ak = ((Klass*)archived_mirror->metadata_field(_array_klass_offset));
+    if (ak != NULL) {
+      archived_mirror->metadata_field_put(_array_klass_offset,
+          (Klass*)(address(ak) + MetaspaceShared::relocation_delta()));
+    }
+  }
+}
+
+void java_lang_Class::update_archived_mirror_native_pointers(oop archived_mirror) {
+  if (MetaspaceShared::relocation_delta() != 0) {
+    Klass* k = ((Klass*)archived_mirror->metadata_field(_klass_offset));
+    archived_mirror->metadata_field_put(_klass_offset,
+        (Klass*)(address(k) + MetaspaceShared::relocation_delta()));
+
+    Klass* ak = ((Klass*)archived_mirror->metadata_field(_array_klass_offset));
+    if (ak != NULL) {
+      archived_mirror->metadata_field_put(_array_klass_offset,
+          (Klass*)(address(ak) + MetaspaceShared::relocation_delta()));
+    }
+  }
+}
+
+
 // Returns true if the mirror is updated, false if no archived mirror
 // data is present. After the archived mirror object is restored, the
 // shared klass' _has_raw_archived_mirror flag is cleared.
@@ -1256,15 +1283,15 @@
   }
 
   oop m = HeapShared::materialize_archived_object(k->archived_java_mirror_raw_narrow());
-
   if (m == NULL) {
     return false;
   }
 
+  // mirror is archived, restore
   log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m));
-
-  // mirror is archived, restore
   assert(HeapShared::is_archived_object(m), "must be archived mirror object");
+  update_archived_mirror_native_pointers(m);
+  assert(as_Klass(m) == k, "must be");
   Handle mirror(THREAD, m);
 
   if (!k->is_array_klass()) {
@@ -2270,7 +2297,7 @@
   st->print_cr("%s", buf);
 }
 
-void java_lang_Throwable::print_stack_element(outputStream *st, const methodHandle& method, int bci) {
+void java_lang_Throwable::print_stack_element(outputStream *st, Method* method, int bci) {
   Handle mirror (Thread::current(),  method->method_holder()->java_mirror());
   int method_id = method->orig_method_idnum();
   int version = method->constants()->version();
@@ -2376,7 +2403,6 @@
   // trace as utilizing vframe.
 #ifdef ASSERT
   vframeStream st(thread);
-  methodHandle st_method(THREAD, st.method());
 #endif
   int total_count = 0;
   RegisterMap map(thread, false);
@@ -2426,14 +2452,9 @@
       }
     }
 #ifdef ASSERT
-    assert(st_method() == method && st.bci() == bci,
+    assert(st.method() == method && st.bci() == bci,
            "Wrong stack trace");
     st.next();
-    // vframeStream::method isn't GC-safe so store off a copy
-    // of the Method* in case we GC.
-    if (!st.at_end()) {
-      st_method = st.method();
-    }
 #endif
 
     // the format of the stacktrace will be:
@@ -2696,7 +2717,7 @@
     }
     java_lang_StackTraceElement::set_fileName(element(), source_file);
 
-    int line_number = Backtrace::get_line_number(method, bci);
+    int line_number = Backtrace::get_line_number(method(), bci);
     java_lang_StackTraceElement::set_lineNumber(element(), line_number);
   }
 }
@@ -2771,7 +2792,8 @@
   short version = stackFrame->short_field(_version_offset);
   int bci = stackFrame->int_field(_bci_offset);
   Symbol* name = method->name();
-  java_lang_StackTraceElement::fill_in(stack_trace_element, holder, method, version, bci, name, CHECK);
+  java_lang_StackTraceElement::fill_in(stack_trace_element, holder, methodHandle(THREAD, method),
+                                       version, bci, name, CHECK);
 }
 
 #define STACKFRAMEINFO_FIELDS_DO(macro) \
@@ -4654,6 +4676,28 @@
 }
 #endif
 
+#if INCLUDE_CDS_JAVA_HEAP
+bool JavaClasses::is_supported_for_archiving(oop obj) {
+  Klass* klass = obj->klass();
+
+  if (klass == SystemDictionary::ClassLoader_klass() ||  // ClassLoader::loader_data is malloc'ed.
+      klass == SystemDictionary::Module_klass() ||       // Module::module_entry is malloc'ed
+      // The next 3 classes are used to implement java.lang.invoke, and are not used directly in
+      // regular Java code. The implementation of java.lang.invoke uses generated anonymoys classes
+      // (e.g., as referenced by ResolvedMethodName::vmholder) that are not yet supported by CDS.
+      // So for now we cannot not support these classes for archiving.
+      //
+      // These objects typically are not referenced by static fields, but rather by resolved
+      // constant pool entries, so excluding them shouldn't affect the archiving of static fields.
+      klass == SystemDictionary::ResolvedMethodName_klass() ||
+      klass == SystemDictionary::MemberName_klass() ||
+      klass == SystemDictionary::Context_klass()) {
+    return false;
+  }
+
+  return true;
+}
+#endif
 
 #ifndef PRODUCT
 
--- a/src/hotspot/share/classfile/javaClasses.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -281,6 +281,8 @@
                             Handle protection_domain, TRAPS);
   static void fixup_mirror(Klass* k, TRAPS);
   static oop  create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS);
+  static void update_archived_primitive_mirror_native_pointers(oop archived_mirror) NOT_CDS_JAVA_HEAP_RETURN;
+  static void update_archived_mirror_native_pointers(oop archived_mirror) NOT_CDS_JAVA_HEAP_RETURN;
 
   // Archiving
   static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN;
@@ -561,7 +563,7 @@
   static oop message(oop throwable);
   static void set_message(oop throwable, oop value);
   static Symbol* detail_message(oop throwable);
-  static void print_stack_element(outputStream *st, const methodHandle& method, int bci);
+  static void print_stack_element(outputStream *st, Method* method, int bci);
   static void print_stack_usage(Handle stream);
 
   static void compute_offsets();
@@ -1402,7 +1404,7 @@
   static int version_at(unsigned int merged);
   static int mid_at(unsigned int merged);
   static int cpref_at(unsigned int merged);
-  static int get_line_number(const methodHandle& method, int bci);
+  static int get_line_number(Method* method, int bci);
   static Symbol* get_source_file_name(InstanceKlass* holder, int version);
 
   // Debugging
@@ -1662,6 +1664,7 @@
   static void check_offsets() PRODUCT_RETURN;
   static void serialize_offsets(SerializeClosure* soc) NOT_CDS_RETURN;
   static InjectedField* get_injected(Symbol* class_name, int* field_count);
+  static bool is_supported_for_archiving(oop obj) NOT_CDS_JAVA_HEAP_RETURN_(false);
 };
 
 #undef DECLARE_INJECTED_FIELD_ENUM
--- a/src/hotspot/share/classfile/javaClasses.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/javaClasses.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -264,7 +264,7 @@
   return extract_low_short_from_int(merged);
 }
 
-inline int Backtrace::get_line_number(const methodHandle& method, int bci) {
+inline int Backtrace::get_line_number(Method* method, int bci) {
   int line_number = 0;
   if (method->is_native()) {
     // Negative value different from -1 below, enabling Java code in
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -2338,9 +2338,9 @@
 }
 
 
-methodHandle SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid,
-                                                            Symbol* signature,
-                                                            TRAPS) {
+Method* SystemDictionary::find_method_handle_intrinsic(vmIntrinsics::ID iid,
+                                                       Symbol* signature,
+                                                       TRAPS) {
   methodHandle empty;
   assert(MethodHandles::is_signature_polymorphic(iid) &&
          MethodHandles::is_signature_polymorphic_intrinsic(iid) &&
@@ -2354,14 +2354,14 @@
   if (spe == NULL || spe->method() == NULL) {
     spe = NULL;
     // Must create lots of stuff here, but outside of the SystemDictionary lock.
-    m = Method::make_method_handle_intrinsic(iid, signature, CHECK_(empty));
+    m = Method::make_method_handle_intrinsic(iid, signature, CHECK_NULL);
     if (!Arguments::is_interpreter_only()) {
       // Generate a compiled form of the MH intrinsic.
       AdapterHandlerLibrary::create_native_wrapper(m);
       // Check if have the compiled code.
       if (!m->has_compiled_code()) {
-        THROW_MSG_(vmSymbols::java_lang_VirtualMachineError(),
-                   "Out of space in CodeCache for method handle intrinsic", empty);
+        THROW_MSG_NULL(vmSymbols::java_lang_VirtualMachineError(),
+                       "Out of space in CodeCache for method handle intrinsic");
       }
     }
     // Now grab the lock.  We might have to throw away the new method,
@@ -2384,12 +2384,11 @@
 }
 
 // Helper for unpacking the return value from linkMethod and linkCallSite.
-static methodHandle unpack_method_and_appendix(Handle mname,
-                                               Klass* accessing_klass,
-                                               objArrayHandle appendix_box,
-                                               Handle* appendix_result,
-                                               TRAPS) {
-  methodHandle empty;
+static Method* unpack_method_and_appendix(Handle mname,
+                                          Klass* accessing_klass,
+                                          objArrayHandle appendix_box,
+                                          Handle* appendix_result,
+                                          TRAPS) {
   if (mname.not_null()) {
     Method* m = java_lang_invoke_MemberName::vmtarget(mname());
     if (m != NULL) {
@@ -2407,35 +2406,34 @@
       // the target is stored in the cpCache and if a reference to this
       // MemberName is dropped we need a way to make sure the
       // class_loader containing this method is kept alive.
+      methodHandle mh(THREAD, m); // record_dependency can safepoint.
       ClassLoaderData* this_key = accessing_klass->class_loader_data();
       this_key->record_dependency(m->method_holder());
-      return methodHandle(THREAD, m);
+      return mh();
     }
   }
-  THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad value from MethodHandleNatives", empty);
-  return empty;
+  THROW_MSG_NULL(vmSymbols::java_lang_LinkageError(), "bad value from MethodHandleNatives");
 }
 
-methodHandle SystemDictionary::find_method_handle_invoker(Klass* klass,
-                                                          Symbol* name,
-                                                          Symbol* signature,
-                                                          Klass* accessing_klass,
-                                                          Handle *appendix_result,
-                                                          TRAPS) {
-  methodHandle empty;
+Method* SystemDictionary::find_method_handle_invoker(Klass* klass,
+                                                     Symbol* name,
+                                                     Symbol* signature,
+                                                     Klass* accessing_klass,
+                                                     Handle *appendix_result,
+                                                     TRAPS) {
   assert(THREAD->can_call_java() ,"");
   Handle method_type =
-    SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_(empty));
+    SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_NULL);
 
   int ref_kind = JVM_REF_invokeVirtual;
-  oop name_oop = StringTable::intern(name, CHECK_(empty));
+  oop name_oop = StringTable::intern(name, CHECK_NULL);
   Handle name_str (THREAD, name_oop);
-  objArrayHandle appendix_box = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), 1, CHECK_(empty));
+  objArrayHandle appendix_box = oopFactory::new_objArray_handle(SystemDictionary::Object_klass(), 1, CHECK_NULL);
   assert(appendix_box->obj_at(0) == NULL, "");
 
   // This should not happen.  JDK code should take care of that.
   if (accessing_klass == NULL || method_type.is_null()) {
-    THROW_MSG_(vmSymbols::java_lang_InternalError(), "bad invokehandle", empty);
+    THROW_MSG_NULL(vmSymbols::java_lang_InternalError(), "bad invokehandle");
   }
 
   // call java.lang.invoke.MethodHandleNatives::linkMethod(... String, MethodType) -> MemberName
@@ -2451,7 +2449,7 @@
                          SystemDictionary::MethodHandleNatives_klass(),
                          vmSymbols::linkMethod_name(),
                          vmSymbols::linkMethod_signature(),
-                         &args, CHECK_(empty));
+                         &args, CHECK_NULL);
   Handle mname(THREAD, (oop) result.get_jobject());
   return unpack_method_and_appendix(mname, accessing_klass, appendix_box, appendix_result, THREAD);
 }
@@ -2755,11 +2753,12 @@
   Handle value(THREAD, (oop) result.get_jobject());
   if (is_indy) {
     Handle appendix;
-    methodHandle method = unpack_method_and_appendix(value,
-                                                     bootstrap_specifier.caller(),
-                                                     appendix_box,
-                                                     &appendix, CHECK);
-    bootstrap_specifier.set_resolved_method(method, appendix);
+    Method* method = unpack_method_and_appendix(value,
+                                                bootstrap_specifier.caller(),
+                                                appendix_box,
+                                                &appendix, CHECK);
+    methodHandle mh(THREAD, method);
+    bootstrap_specifier.set_resolved_method(mh, appendix);
   } else {
     bootstrap_specifier.set_resolved_value(value);
   }
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -464,17 +464,17 @@
   // JSR 292
   // find a java.lang.invoke.MethodHandle.invoke* method for a given signature
   // (asks Java to compute it if necessary, except in a compiler thread)
-  static methodHandle find_method_handle_invoker(Klass* klass,
-                                                 Symbol* name,
-                                                 Symbol* signature,
-                                                 Klass* accessing_klass,
-                                                 Handle *appendix_result,
-                                                 TRAPS);
+  static Method* find_method_handle_invoker(Klass* klass,
+                                            Symbol* name,
+                                            Symbol* signature,
+                                            Klass* accessing_klass,
+                                            Handle *appendix_result,
+                                            TRAPS);
   // for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic)
   // (does not ask Java, since this is a low-level intrinsic defined by the JVM)
-  static methodHandle find_method_handle_intrinsic(vmIntrinsics::ID iid,
-                                                   Symbol* signature,
-                                                   TRAPS);
+  static Method* find_method_handle_intrinsic(vmIntrinsics::ID iid,
+                                              Symbol* signature,
+                                              TRAPS);
 
   // compute java_mirror (java.lang.Class instance) for a type ("I", "[[B", "LFoo;", etc.)
   // Either the accessing_klass or the CL/PD can be non-null, but not both.
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -38,6 +38,7 @@
 #include "classfile/vmSymbols.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.hpp"
+#include "memory/archiveUtils.hpp"
 #include "memory/filemap.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceClosure.hpp"
@@ -294,6 +295,7 @@
     if (DynamicDumpSharedSpaces) {
       _klass = DynamicArchive::original_to_target(info._klass);
     }
+    ArchivePtrMarker::mark_pointer(&_klass);
   }
 
   bool matches(int clsfile_size, int clsfile_crc32) const {
@@ -337,6 +339,8 @@
     } else {
       *info_pointer_addr(klass) = record;
     }
+
+    ArchivePtrMarker::mark_pointer(info_pointer_addr(klass));
   }
 
   // Used by RunTimeSharedDictionary to implement OffsetCompactHashtable::EQUALS
@@ -1354,7 +1358,7 @@
       if (DynamicDumpSharedSpaces) {
         name = DynamicArchive::original_to_target(name);
       }
-      hash = primitive_hash<Symbol*>(name);
+      hash = SystemDictionaryShared::hash_for_shared_dictionary(name);
       u4 delta;
       if (DynamicDumpSharedSpaces) {
         delta = MetaspaceShared::object_delta_u4(DynamicArchive::buffer_to_target(record));
@@ -1413,7 +1417,7 @@
     return NULL;
   }
 
-  unsigned int hash = primitive_hash<Symbol*>(name);
+  unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(name);
   const RunTimeSharedClassInfo* record = NULL;
   if (!MetaspaceShared::is_shared_dynamic(name)) {
     // The names of all shared classes in the static dict must also be in the
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -323,6 +323,12 @@
   };
 #endif
 
+  template <typename T>
+  static unsigned int hash_for_shared_dictionary(T* ptr) {
+    assert(ptr > (T*)SharedBaseAddress, "must be");
+    address p = address(ptr) - SharedBaseAddress;
+    return primitive_hash<address>(p);
+  }
 };
 
 #endif // SHARE_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/src/hotspot/share/classfile/vmSymbols.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/classfile/vmSymbols.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1069,19 +1069,18 @@
 
   const char* declared_name = name_at(declared_id);
   const char* actual_name   = name_at(actual_id);
-  methodHandle mh = m;
   m = NULL;
   ttyLocker ttyl;
   if (xtty != NULL) {
     xtty->begin_elem("intrinsic_misdeclared actual='%s' declared='%s'",
                      actual_name, declared_name);
-    xtty->method(mh);
+    xtty->method(m);
     xtty->end_elem("%s", "");
   }
   if (PrintMiscellaneous && (WizardMode || Verbose)) {
     tty->print_cr("*** misidentified method; %s(%d) should be %s(%d):",
                   declared_name, declared_id, actual_name, actual_id);
-    mh()->print_short_name(tty);
+    m->print_short_name(tty);
     tty->cr();
   }
 }
--- a/src/hotspot/share/code/compiledIC.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/code/compiledIC.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -287,7 +287,7 @@
 
   if (TraceICs) {
     ResourceMark rm;
-    assert(!call_info->selected_method().is_null(), "Unexpected null selected method");
+    assert(call_info->selected_method() != NULL, "Unexpected null selected method");
     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
                    p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
   }
--- a/src/hotspot/share/code/compiledMethod.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/code/compiledMethod.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -355,7 +355,7 @@
   if (method() != NULL && !method()->is_native()) {
     address pc = fr.pc();
     SimpleScopeDesc ssd(this, pc);
-    Bytecode_invoke call(ssd.method(), ssd.bci());
+    Bytecode_invoke call(methodHandle(Thread::current(), ssd.method()), ssd.bci());
     bool has_receiver = call.has_receiver();
     bool has_appendix = call.has_appendix();
     Symbol* signature = call.signature();
--- a/src/hotspot/share/code/nmethod.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/code/nmethod.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -964,15 +964,16 @@
 
 #if defined(SUPPORT_DATA_STRUCTS)
   if (AbstractDisassembler::show_structs()) {
-    if (printmethod || PrintDebugInfo || CompilerOracle::has_option_string(_method, "PrintDebugInfo")) {
+    methodHandle mh(Thread::current(), _method);
+    if (printmethod || PrintDebugInfo || CompilerOracle::has_option_string(mh, "PrintDebugInfo")) {
       print_scopes();
       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
     }
-    if (printmethod || PrintRelocations || CompilerOracle::has_option_string(_method, "PrintRelocations")) {
+    if (printmethod || PrintRelocations || CompilerOracle::has_option_string(mh, "PrintRelocations")) {
       print_relocations();
       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
     }
-    if (printmethod || PrintDependencies || CompilerOracle::has_option_string(_method, "PrintDependencies")) {
+    if (printmethod || PrintDependencies || CompilerOracle::has_option_string(mh, "PrintDependencies")) {
       print_dependencies();
       tty->print_cr("- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ");
     }
@@ -1302,9 +1303,8 @@
     return false;
   }
 
-  // Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
+  // Make sure the nmethod is not flushed.
   nmethodLocker nml(this);
-  methodHandle the_method(method());
   // This can be called while the system is already at a safepoint which is ok
   NoSafepointVerifier nsv;
 
@@ -3079,13 +3079,13 @@
   }
 
   if (block_begin == entry_point()) {
-    methodHandle m = method();
-    if (m.not_null()) {
+    Method* m = method();
+    if (m != NULL) {
       stream->print("  # ");
       m->print_value_on(stream);
       stream->cr();
     }
-    if (m.not_null() && !is_osr_method()) {
+    if (m != NULL && !is_osr_method()) {
       ResourceMark rm;
       int sizeargs = m->size_of_parameters();
       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
@@ -3237,6 +3237,8 @@
   }
   assert(!oop_map_required, "missed oopmap");
 
+  Thread* thread = Thread::current();
+
   // Print any debug info present at this pc.
   ScopeDesc* sd  = scope_desc_in(begin, end);
   if (sd != NULL) {
@@ -3267,7 +3269,7 @@
         case Bytecodes::_invokestatic:
         case Bytecodes::_invokeinterface:
           {
-            Bytecode_invoke invoke(sd->method(), sd->bci());
+            Bytecode_invoke invoke(methodHandle(thread, sd->method()), sd->bci());
             st->print(" ");
             if (invoke.name() != NULL)
               invoke.name()->print_symbol_on(st);
@@ -3280,7 +3282,7 @@
         case Bytecodes::_getstatic:
         case Bytecodes::_putstatic:
           {
-            Bytecode_field field(sd->method(), sd->bci());
+            Bytecode_field field(methodHandle(thread, sd->method()), sd->bci());
             st->print(" ");
             if (field.name() != NULL)
               field.name()->print_symbol_on(st);
@@ -3356,7 +3358,7 @@
       if (cm != NULL && cm->is_far_code()) {
         // Temporary fix, see JDK-8143106
         CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address());
-        csc->set_to_far(methodHandle(cm->method()), dest);
+        csc->set_to_far(methodHandle(Thread::current(), cm->method()), dest);
         return;
       }
     }
--- a/src/hotspot/share/compiler/compileBroker.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/compiler/compileBroker.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -299,7 +299,7 @@
 /**
  * Check if a CompilerThread can be removed and update count if requested.
  */
-static bool can_remove(CompilerThread *ct, bool do_it) {
+bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) {
   assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here");
   if (!ReduceNumberOfCompilerThreads) return false;
 
@@ -313,13 +313,32 @@
   // Keep thread alive for at least some time.
   if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false;
 
+#if INCLUDE_JVMCI
+  if (compiler->is_jvmci()) {
+    // Handles for JVMCI thread objects may get released concurrently.
+    if (do_it) {
+      assert(CompileThread_lock->owner() == ct, "must be holding lock");
+    } else {
+      // Skip check if it's the last thread and let caller check again.
+      return true;
+    }
+  }
+#endif
+
   // We only allow the last compiler thread of each type to get removed.
-  jobject last_compiler = c1 ? CompileBroker::compiler1_object(compiler_count - 1)
-                             : CompileBroker::compiler2_object(compiler_count - 1);
+  jobject last_compiler = c1 ? compiler1_object(compiler_count - 1)
+                             : compiler2_object(compiler_count - 1);
   if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) {
     if (do_it) {
       assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent.
       compiler->set_num_compiler_threads(compiler_count - 1);
+#if INCLUDE_JVMCI
+      if (compiler->is_jvmci()) {
+        // Old j.l.Thread object can die when no longer referenced elsewhere.
+        JNIHandles::destroy_global(compiler2_object(compiler_count - 1));
+        _compiler2_objects[compiler_count - 1] = NULL;
+      }
+#endif
     }
     return true;
   }
@@ -426,7 +445,7 @@
 
     if (UseDynamicNumberOfCompilerThreads && _first == NULL) {
       // Still nothing to compile. Give caller a chance to stop this thread.
-      if (can_remove(CompilerThread::current(), false)) return NULL;
+      if (CompileBroker::can_remove(CompilerThread::current(), false)) return NULL;
     }
   }
 
@@ -446,8 +465,9 @@
   if (task != NULL) {
     // Save method pointers across unlock safepoint.  The task is removed from
     // the compilation queue, which is walked during RedefineClasses.
-    save_method = methodHandle(task->method());
-    save_hot_method = methodHandle(task->hot_method());
+    Thread* thread = Thread::current();
+    save_method = methodHandle(thread, task->method());
+    save_hot_method = methodHandle(thread, task->hot_method());
 
     remove(task);
   }
@@ -842,10 +862,15 @@
   char name_buffer[256];
 
   for (int i = 0; i < _c2_count; i++) {
+    jobject thread_handle = NULL;
+    // Create all j.l.Thread objects for C1 and C2 threads here, but only one
+    // for JVMCI compiler which can create further ones on demand.
+    JVMCI_ONLY(if (!UseJVMCICompiler || !UseDynamicNumberOfCompilerThreads || i == 0) {)
     // Create a name for our thread.
     sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
     Handle thread_oop = create_thread_oop(name_buffer, CHECK);
-    jobject thread_handle = JNIHandles::make_global(thread_oop);
+    thread_handle = JNIHandles::make_global(thread_oop);
+    JVMCI_ONLY(})
     _compiler2_objects[i] = thread_handle;
     _compiler2_logs[i] = NULL;
 
@@ -912,6 +937,39 @@
         (int)(available_cc_np / (128*K)));
 
     for (int i = old_c2_count; i < new_c2_count; i++) {
+#if INCLUDE_JVMCI
+      if (UseJVMCICompiler) {
+        // Native compiler threads as used in C1/C2 can reuse the j.l.Thread
+        // objects as their existence is completely hidden from the rest of
+        // the VM (and those compiler threads can't call Java code to do the
+        // creation anyway). For JVMCI we have to create new j.l.Thread objects
+        // as they are visible and we can see unexpected thread lifecycle
+        // transitions if we bind them to new JavaThreads.
+        if (!THREAD->can_call_java()) break;
+        char name_buffer[256];
+        sprintf(name_buffer, "%s CompilerThread%d", _compilers[1]->name(), i);
+        Handle thread_oop;
+        {
+          // We have to give up the lock temporarily for the Java calls.
+          MutexUnlocker mu(CompileThread_lock);
+          thread_oop = create_thread_oop(name_buffer, THREAD);
+        }
+        if (HAS_PENDING_EXCEPTION) {
+          if (TraceCompilerThreads) {
+            ResourceMark rm;
+            tty->print_cr("JVMCI compiler thread creation failed:");
+            PENDING_EXCEPTION->print();
+          }
+          CLEAR_PENDING_EXCEPTION;
+          break;
+        }
+        // Check if another thread has beaten us during the Java calls.
+        if (_compilers[1]->num_compiler_threads() != i) break;
+        jobject thread_handle = JNIHandles::make_global(thread_oop);
+        assert(compiler2_object(i) == NULL, "Old one must be released!");
+        _compiler2_objects[i] = thread_handle;
+      }
+#endif
       JavaThread *ct = make_thread(compiler2_object(i), _c2_compile_queue, _compilers[1], CHECK);
       if (ct == NULL) break;
       _compilers[1]->set_num_compiler_threads(i + 1);
--- a/src/hotspot/share/compiler/compileBroker.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/compiler/compileBroker.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -395,6 +395,8 @@
     return _compiler2_objects[idx];
   }
 
+  static bool can_remove(CompilerThread *ct, bool do_it);
+
   static CompileLog* get_log(CompilerThread* ct);
 
   static int get_total_compile_count() {            return _total_compile_count; }
--- a/src/hotspot/share/compiler/compileTask.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/compiler/compileTask.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -340,7 +340,7 @@
   if (_osr_bci != CompileBroker::standard_entry_bci) {
     log->print(" compile_kind='osr'");  // same as nmethod::compile_kind
   } // else compile_kind='c2c'
-  if (!method.is_null())  log->method(method);
+  if (!method.is_null())  log->method(method());
   if (_osr_bci != CompileBroker::standard_entry_bci) {
     log->print(" osr_bci='%d'", _osr_bci);
   }
@@ -356,21 +356,16 @@
 // ------------------------------------------------------------------
 // CompileTask::log_task_queued
 void CompileTask::log_task_queued() {
-  Thread* thread = Thread::current();
   ttyLocker ttyl;
-  ResourceMark rm(thread);
+  ResourceMark rm;
 
   xtty->begin_elem("task_queued");
   log_task(xtty);
   assert(_compile_reason > CompileTask::Reason_None && _compile_reason < CompileTask::Reason_Count, "Valid values");
   xtty->print(" comment='%s'", reason_name(_compile_reason));
 
-  if (_hot_method != NULL) {
-    methodHandle hot(thread, _hot_method);
-    methodHandle method(thread, _method);
-    if (hot() != method()) {
-      xtty->method(hot);
-    }
+  if (_hot_method != NULL && _hot_method != _method) {
+    xtty->method(_hot_method);
   }
   if (_hot_count != 0) {
     xtty->print(" hot_count='%d'", _hot_count);
--- a/src/hotspot/share/compiler/compilerDefinitions.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/compiler/compilerDefinitions.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -54,10 +54,13 @@
     } else if (strcmp(CompilationMode, "high-only-quick-internal") == 0) {
       _high_only_quick_internal = true;
     } else {
-        jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', supported modes are: quick-only, high-only, high-only-quick-internal\n", CompilationMode);
-        return false;
-      }
+      jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', supported modes are: quick-only, high-only, high-only-quick-internal\n", CompilationMode);
+      return false;
     }
+    if (disable_intermediate()) {
+      CompLevel_initial_compile = CompLevel_full_optimization;
+    }
+  }
   return true;
 }
 
--- a/src/hotspot/share/compiler/tieredThresholdPolicy.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/compiler/tieredThresholdPolicy.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -43,7 +43,7 @@
 #include "c1/c1_Compiler.hpp"
 #include "opto/c2compiler.hpp"
 
-bool TieredThresholdPolicy::call_predicate_helper(Method* method, CompLevel cur_level, int i, int b, double scale) {
+bool TieredThresholdPolicy::call_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
   double threshold_scaling;
   if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
     scale *= threshold_scaling;
@@ -74,7 +74,7 @@
   }
 }
 
-bool TieredThresholdPolicy::loop_predicate_helper(Method* method, CompLevel cur_level, int i, int b, double scale) {
+bool TieredThresholdPolicy::loop_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
   double threshold_scaling;
   if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) {
     scale *= threshold_scaling;
@@ -110,7 +110,7 @@
   return false;
 }
 
-bool TieredThresholdPolicy::force_comp_at_level_simple(Method* method) {
+bool TieredThresholdPolicy::force_comp_at_level_simple(const methodHandle& method) {
   if (CompilationModeFlag::quick_internal()) {
 #if INCLUDE_JVMCI
     if (UseJVMCICompiler) {
@@ -132,10 +132,10 @@
   return CompLevel_none;
 }
 
-void TieredThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) {
-  int invocation_count = mh->invocation_count();
-  int backedge_count = mh->backedge_count();
-  MethodData* mdh = mh->method_data();
+void TieredThresholdPolicy::print_counters(const char* prefix, Method* m) {
+  int invocation_count = m->invocation_count();
+  int backedge_count = m->backedge_count();
+  MethodData* mdh = m->method_data();
   int mdo_invocations = 0, mdo_backedges = 0;
   int mdo_invocations_start = 0, mdo_backedges_start = 0;
   if (mdh != NULL) {
@@ -149,13 +149,13 @@
       mdo_invocations, mdo_invocations_start,
       mdo_backedges, mdo_backedges_start);
   tty->print(" %smax levels=%d,%d", prefix,
-      mh->highest_comp_level(), mh->highest_osr_comp_level());
+      m->highest_comp_level(), m->highest_osr_comp_level());
 }
 
 // Print an event.
-void TieredThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh,
+void TieredThresholdPolicy::print_event(EventType type, Method* m, Method* im,
                                         int bci, CompLevel level) {
-  bool inlinee_event = mh() != imh();
+  bool inlinee_event = m != im;
 
   ttyLocker tty_lock;
   tty->print("%lf: [", os::elapsedTime());
@@ -189,10 +189,10 @@
   tty->print(" level=%d ", level);
 
   ResourceMark rm;
-  char *method_name = mh->name_and_sig_as_C_string();
+  char *method_name = m->name_and_sig_as_C_string();
   tty->print("[%s", method_name);
   if (inlinee_event) {
-    char *inlinee_name = imh->name_and_sig_as_C_string();
+    char *inlinee_name = im->name_and_sig_as_C_string();
     tty->print(" [%s]] ", inlinee_name);
   }
   else tty->print("] ");
@@ -200,39 +200,39 @@
                                       CompileBroker::queue_size(CompLevel_full_optimization));
 
   tty->print(" rate=");
-  if (mh->prev_time() == 0) tty->print("n/a");
-  else tty->print("%f", mh->rate());
+  if (m->prev_time() == 0) tty->print("n/a");
+  else tty->print("%f", m->rate());
 
   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
 
   if (type != COMPILE) {
-    print_counters("", mh);
+    print_counters("", m);
     if (inlinee_event) {
-      print_counters("inlinee ", imh);
+      print_counters("inlinee ", im);
     }
     tty->print(" compilable=");
     bool need_comma = false;
-    if (!mh->is_not_compilable(CompLevel_full_profile)) {
+    if (!m->is_not_compilable(CompLevel_full_profile)) {
       tty->print("c1");
       need_comma = true;
     }
-    if (!mh->is_not_osr_compilable(CompLevel_full_profile)) {
+    if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
       if (need_comma) tty->print(",");
       tty->print("c1-osr");
       need_comma = true;
     }
-    if (!mh->is_not_compilable(CompLevel_full_optimization)) {
+    if (!m->is_not_compilable(CompLevel_full_optimization)) {
       if (need_comma) tty->print(",");
       tty->print("c2");
       need_comma = true;
     }
-    if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) {
+    if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
       if (need_comma) tty->print(",");
       tty->print("c2-osr");
     }
     tty->print(" status=");
-    if (mh->queued_for_compilation()) {
+    if (m->queued_for_compilation()) {
       tty->print("in-queue");
     } else tty->print("idle");
   }
@@ -376,12 +376,14 @@
     max_method = max_task->method();
   }
 
+  methodHandle max_method_h(Thread::current(), max_method);
+
   if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile &&
       TieredStopAtLevel > CompLevel_full_profile &&
-      max_method != NULL && is_method_profiled(max_method)) {
+      max_method != NULL && is_method_profiled(max_method_h)) {
     max_task->set_comp_level(CompLevel_limited_profile);
 
-    if (CompileBroker::compilation_is_complete(max_method, max_task->osr_bci(), CompLevel_limited_profile)) {
+    if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
       if (PrintTieredEvents) {
         print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
       }
@@ -401,8 +403,7 @@
 void TieredThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
   for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
     if (PrintTieredEvents) {
-      methodHandle mh(sd->method());
-      print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none);
+      print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
     }
     MethodData* mdo = sd->method()->method_data();
     if (mdo != NULL) {
@@ -430,7 +431,7 @@
   }
 
   if (PrintTieredEvents) {
-    print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level);
+    print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
   }
 
   if (bci == InvocationEntryBci) {
@@ -481,7 +482,7 @@
   if (level == CompLevel_aot) {
     if (mh->has_aot_code()) {
       if (PrintTieredEvents) {
-        print_event(COMPILE, mh, mh, bci, level);
+        print_event(COMPILE, mh(), mh(), bci, level);
       }
       MutexLocker ml(Compile_lock);
       NoSafepointVerifier nsv;
@@ -525,7 +526,7 @@
   }
   if (!CompileBroker::compilation_is_in_queue(mh)) {
     if (PrintTieredEvents) {
-      print_event(COMPILE, mh, mh, bci, level);
+      print_event(COMPILE, mh(), mh(), bci, level);
     }
     int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
     update_rate(os::javaTimeMillis(), mh());
@@ -610,7 +611,7 @@
 }
 
 // Is method profiled enough?
-bool TieredThresholdPolicy::is_method_profiled(Method* method) {
+bool TieredThresholdPolicy::is_method_profiled(const methodHandle& method) {
   MethodData* mdo = method->method_data();
   if (mdo != NULL) {
     int i = mdo->invocation_count_delta();
@@ -647,7 +648,7 @@
 // Tier?LoadFeedback is basically a coefficient that determines of
 // how many methods per compiler thread can be in the queue before
 // the threshold values double.
-bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) {
+bool TieredThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, const methodHandle& method) {
   double k = 1;
   switch(cur_level) {
   case CompLevel_aot: {
@@ -672,10 +673,10 @@
   default:
     return true;
   }
- return loop_predicate_helper(method, cur_level, i, b, k);
+  return loop_predicate_helper(method, cur_level, i, b, k);
 }
 
-bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) {
+bool TieredThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, const methodHandle& method) {
   double k = 1;
   switch(cur_level) {
   case CompLevel_aot: {
@@ -705,14 +706,15 @@
 
 // Determine is a method is mature.
 bool TieredThresholdPolicy::is_mature(Method* method) {
-  if (is_trivial(method) || force_comp_at_level_simple(method)) return true;
+  methodHandle mh(Thread::current(), method);
+  if (is_trivial(method) || force_comp_at_level_simple(mh)) return true;
   MethodData* mdo = method->method_data();
   if (mdo != NULL) {
     int i = mdo->invocation_count();
     int b = mdo->backedge_count();
     double k = ProfileMaturityPercentage / 100.0;
     CompLevel main_profile_level = CompilationModeFlag::disable_intermediate() ? CompLevel_none : CompLevel_full_profile;
-    return call_predicate_helper(method, main_profile_level, i, b, k) || loop_predicate_helper(method, main_profile_level, i, b, k);
+    return call_predicate_helper(mh, main_profile_level, i, b, k) || loop_predicate_helper(mh, main_profile_level, i, b, k);
   }
   return false;
 }
@@ -720,7 +722,7 @@
 // If a method is old enough and is still in the interpreter we would want to
 // start profiling without waiting for the compiled method to arrive.
 // We also take the load on compilers into the account.
-bool TieredThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
+bool TieredThresholdPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
   if (cur_level != CompLevel_none || force_comp_at_level_simple(method)) {
     return false;
   }
@@ -799,7 +801,7 @@
  */
 
 // Common transition function. Given a predicate determines if a method should transition to another level.
-CompLevel TieredThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
+CompLevel TieredThresholdPolicy::common(Predicate p, const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
   CompLevel next_level = cur_level;
   int i = method->invocation_count();
   int b = method->backedge_count();
@@ -807,7 +809,7 @@
   if (force_comp_at_level_simple(method)) {
     next_level = CompLevel_simple;
   } else {
-    if (!CompilationModeFlag::disable_intermediate() && is_trivial(method)) {
+    if (!CompilationModeFlag::disable_intermediate() && is_trivial(method())) {
       next_level = CompLevel_simple;
     } else {
       switch(cur_level) {
@@ -926,7 +928,7 @@
 }
 
 // Determine if a method should be compiled with a normal entry point at a different level.
-CompLevel TieredThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread* thread) {
+CompLevel TieredThresholdPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* thread) {
   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
                              common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true));
   CompLevel next_level = common(&TieredThresholdPolicy::call_predicate, method, cur_level);
@@ -947,7 +949,7 @@
 }
 
 // Determine if we should do an OSR compilation of a given method.
-CompLevel TieredThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) {
+CompLevel TieredThresholdPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* thread) {
   CompLevel next_level = common(&TieredThresholdPolicy::loop_predicate, method, cur_level, true);
   if (cur_level == CompLevel_none) {
     // If there is a live OSR method that means that we deopted to the interpreter
@@ -983,10 +985,10 @@
 // Handle the invocation event.
 void TieredThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
                                                       CompLevel level, CompiledMethod* nm, JavaThread* thread) {
-  if (should_create_mdo(mh(), level)) {
+  if (should_create_mdo(mh, level)) {
     create_mdo(mh, thread);
   }
-  CompLevel next_level = call_event(mh(), level, thread);
+  CompLevel next_level = call_event(mh, level, thread);
   if (next_level != level) {
     if (maybe_switch_to_aot(mh, level, next_level, thread)) {
       // No JITting necessary
@@ -1002,16 +1004,16 @@
 // with a regular entry from here.
 void TieredThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
                                                      int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) {
-  if (should_create_mdo(mh(), level)) {
+  if (should_create_mdo(mh, level)) {
     create_mdo(mh, thread);
   }
   // Check if MDO should be created for the inlined method
-  if (should_create_mdo(imh(), level)) {
+  if (should_create_mdo(imh, level)) {
     create_mdo(imh, thread);
   }
 
   if (is_compilation_enabled()) {
-    CompLevel next_osr_level = loop_event(imh(), level, thread);
+    CompLevel next_osr_level = loop_event(imh, level, thread);
     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
     // At the very least compile the OSR version
     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
@@ -1032,7 +1034,7 @@
         // Current loop event level is not AOT
         guarantee(nm != NULL, "Should have nmethod here");
         cur_level = comp_level(mh());
-        next_level = call_event(mh(), cur_level, thread);
+        next_level = call_event(mh, cur_level, thread);
 
         if (max_osr_level == CompLevel_full_optimization) {
           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
@@ -1068,7 +1070,7 @@
       }
     } else {
       cur_level = comp_level(mh());
-      next_level = call_event(mh(), cur_level, thread);
+      next_level = call_event(mh, cur_level, thread);
       if (next_level != cur_level) {
         if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) {
           compile(mh, InvocationEntryBci, next_level, thread);
--- a/src/hotspot/share/compiler/tieredThresholdPolicy.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/compiler/tieredThresholdPolicy.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -173,19 +173,19 @@
   // Call and loop predicates determine whether a transition to a higher compilation
   // level should be performed (pointers to predicate functions are passed to common_TF().
   // Predicates also take compiler load into account.
-  typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method);
-  bool call_predicate(int i, int b, CompLevel cur_level, Method* method);
-  bool loop_predicate(int i, int b, CompLevel cur_level, Method* method);
+  typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, const methodHandle& method);
+  bool call_predicate(int i, int b, CompLevel cur_level, const methodHandle& method);
+  bool loop_predicate(int i, int b, CompLevel cur_level, const methodHandle& method);
   // Common transition function. Given a predicate determines if a method should transition to another level.
-  CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false);
+  CompLevel common(Predicate p, const methodHandle& method, CompLevel cur_level, bool disable_feedback = false);
   // Transition functions.
   // call_event determines if a method should be compiled at a different
   // level with a regular invocation entry.
-  CompLevel call_event(Method* method, CompLevel cur_level, JavaThread* thread);
+  CompLevel call_event(const methodHandle& method, CompLevel cur_level, JavaThread* thread);
   // loop_event checks if a method should be OSR compiled at a different
   // level.
-  CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread* thread);
-  void print_counters(const char* prefix, const methodHandle& mh);
+  CompLevel loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* thread);
+  void print_counters(const char* prefix, Method* m);
   // Has a method been long around?
   // We don't remove old methods from the compile queue even if they have
   // very low activity (see select_task()).
@@ -205,11 +205,11 @@
   // If a method is old enough and is still in the interpreter we would want to
   // start profiling without waiting for the compiled method to arrive. This function
   // determines whether we should do that.
-  inline bool should_create_mdo(Method* method, CompLevel cur_level);
+  inline bool should_create_mdo(const methodHandle& method, CompLevel cur_level);
   // Create MDO if necessary.
   void create_mdo(const methodHandle& mh, JavaThread* thread);
   // Is method profiled enough?
-  bool is_method_profiled(Method* method);
+  bool is_method_profiled(const methodHandle& method);
 
   double _increase_threshold_at_ratio;
 
@@ -221,19 +221,19 @@
   void set_c2_count(int x) { _c2_count = x;    }
 
   enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
-  void print_event(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level);
+  void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level);
   // Check if the method can be compiled, change level if necessary
   void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread);
   // Simple methods are as good being compiled with C1 as C2.
   // This function tells if it's such a function.
   inline static bool is_trivial(Method* method);
   // Force method to be compiled at CompLevel_simple?
-  inline bool force_comp_at_level_simple(Method* method);
+  inline bool force_comp_at_level_simple(const methodHandle& method);
 
   // Predicate helpers are used by .*_predicate() methods as well as others.
   // They check the given counter values, multiplied by the scale against the thresholds.
-  inline bool call_predicate_helper(Method* method, CompLevel cur_level, int i, int b, double scale);
-  inline bool loop_predicate_helper(Method* method, CompLevel cur_level, int i, int b, double scale);
+  inline bool call_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale);
+  inline bool loop_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale);
 
   // Get a compilation level for a given method.
   static CompLevel comp_level(Method* method);
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/adaptiveFreeList.hpp"
-#include "gc/cms/freeChunk.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "memory/freeList.inline.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-
-template <>
-void AdaptiveFreeList<FreeChunk>::print_on(outputStream* st, const char* c) const {
-  if (c != NULL) {
-    st->print("%16s", c);
-  } else {
-    st->print(SIZE_FORMAT_W(16), size());
-  }
-  st->print("\t"
-           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t"
-           SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\t" SSIZE_FORMAT_W(14) "\n",
-           bfr_surp(),             surplus(),             desired(),             prev_sweep(),           before_sweep(),
-           count(),               coal_births(),          coal_deaths(),          split_births(),         split_deaths());
-}
-
-template <class Chunk>
-AdaptiveFreeList<Chunk>::AdaptiveFreeList() : FreeList<Chunk>(), _hint(0) {
-  init_statistics();
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::initialize() {
-  FreeList<Chunk>::initialize();
-  set_hint(0);
-  init_statistics(true /* split_birth */);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::reset(size_t hint) {
-  FreeList<Chunk>::reset();
-  set_hint(hint);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::init_statistics(bool split_birth) {
-  _allocation_stats.initialize(split_birth);
-}
-
-template <class Chunk>
-size_t AdaptiveFreeList<Chunk>::get_better_size() {
-
-  // A candidate chunk has been found.  If it is already under
-  // populated and there is a hinT, REturn the hint().  Else
-  // return the size of this chunk.
-  if (surplus() <= 0) {
-    if (hint() != 0) {
-      return hint();
-    } else {
-      return size();
-    }
-  } else {
-    // This list has a surplus so use it.
-    return size();
-  }
-}
-
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk) {
-  assert_proper_lock_protection();
-  return_chunk_at_head(chunk, true);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_head(Chunk* chunk, bool record_return) {
-  FreeList<Chunk>::return_chunk_at_head(chunk, record_return);
-#ifdef ASSERT
-  if (record_return) {
-    increment_returned_bytes_by(size()*HeapWordSize);
-  }
-#endif
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk) {
-  AdaptiveFreeList<Chunk>::return_chunk_at_tail(chunk, true);
-}
-
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::return_chunk_at_tail(Chunk* chunk, bool record_return) {
-  FreeList<Chunk>::return_chunk_at_tail(chunk, record_return);
-#ifdef ASSERT
-  if (record_return) {
-    increment_returned_bytes_by(size()*HeapWordSize);
-  }
-#endif
-}
-
-#ifndef PRODUCT
-template <class Chunk>
-void AdaptiveFreeList<Chunk>::verify_stats() const {
-  // The +1 of the LH comparand is to allow some "looseness" in
-  // checking: we usually call this interface when adding a block
-  // and we'll subsequently update the stats; we cannot update the
-  // stats beforehand because in the case of the large-block BT
-  // dictionary for example, this might be the first block and
-  // in that case there would be no place that we could record
-  // the stats (which are kept in the block itself).
-  assert((_allocation_stats.prev_sweep() + _allocation_stats.split_births()
-          + _allocation_stats.coal_births() + 1)   // Total Production Stock + 1
-         >= (_allocation_stats.split_deaths() + _allocation_stats.coal_deaths()
-             + (ssize_t)count()),                // Total Current Stock + depletion
-         "FreeList " PTR_FORMAT " of size " SIZE_FORMAT
-         " violates Conservation Principle: "
-         "prev_sweep(" SIZE_FORMAT ")"
-         " + split_births(" SIZE_FORMAT ")"
-         " + coal_births(" SIZE_FORMAT ") + 1 >= "
-         " split_deaths(" SIZE_FORMAT ")"
-         " coal_deaths(" SIZE_FORMAT ")"
-         " + count(" SSIZE_FORMAT ")",
-         p2i(this), size(), _allocation_stats.prev_sweep(), _allocation_stats.split_births(),
-         _allocation_stats.coal_births(), _allocation_stats.split_deaths(),
-         _allocation_stats.coal_deaths(), count());
-}
-#endif
-
-// Needs to be after the definitions have been seen.
-template class AdaptiveFreeList<FreeChunk>;
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,229 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
-#define SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
-
-#include "gc/cms/allocationStats.hpp"
-#include "memory/freeList.hpp"
-
-class CompactibleFreeListSpace;
-
-// A class for maintaining a free list of Chunk's.  The FreeList
-// maintains a the structure of the list (head, tail, etc.) plus
-// statistics for allocations from the list.  The links between items
-// are not part of FreeList.  The statistics are
-// used to make decisions about coalescing Chunk's when they
-// are swept during collection.
-//
-// See the corresponding .cpp file for a description of the specifics
-// for that implementation.
-
-class Mutex;
-
-template <class Chunk>
-class AdaptiveFreeList : public FreeList<Chunk> {
-  friend class CompactibleFreeListSpace;
-  friend class VMStructs;
-  // friend class PrintTreeCensusClosure<Chunk, FreeList_t>;
-
-  size_t        _hint;          // next larger size list with a positive surplus
-
-  AllocationStats _allocation_stats; // allocation-related statistics
-
- public:
-
-  AdaptiveFreeList();
-
-  using FreeList<Chunk>::assert_proper_lock_protection;
-#ifdef ASSERT
-  using FreeList<Chunk>::protecting_lock;
-#endif
-  using FreeList<Chunk>::count;
-  using FreeList<Chunk>::size;
-  using FreeList<Chunk>::verify_chunk_in_free_list;
-  using FreeList<Chunk>::getFirstNChunksFromList;
-  using FreeList<Chunk>::print_on;
-  void return_chunk_at_head(Chunk* fc, bool record_return);
-  void return_chunk_at_head(Chunk* fc);
-  void return_chunk_at_tail(Chunk* fc, bool record_return);
-  void return_chunk_at_tail(Chunk* fc);
-  using FreeList<Chunk>::return_chunk_at_tail;
-  using FreeList<Chunk>::remove_chunk;
-  using FreeList<Chunk>::prepend;
-  using FreeList<Chunk>::print_labels_on;
-  using FreeList<Chunk>::get_chunk_at_head;
-
-  // Initialize.
-  void initialize();
-
-  // Reset the head, tail, hint, and count of a free list.
-  void reset(size_t hint);
-
-  void print_on(outputStream* st, const char* c = NULL) const;
-
-  size_t hint() const {
-    return _hint;
-  }
-  void set_hint(size_t v) {
-    assert_proper_lock_protection();
-    assert(v == 0 || size() < v, "Bad hint");
-    _hint = v;
-  }
-
-  size_t get_better_size();
-
-  // Accessors for statistics
-  void init_statistics(bool split_birth = false);
-
-  AllocationStats* allocation_stats() {
-    assert_proper_lock_protection();
-    return &_allocation_stats;
-  }
-
-  ssize_t desired() const {
-    return _allocation_stats.desired();
-  }
-  void set_desired(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_desired(v);
-  }
-  void compute_desired(float inter_sweep_current,
-                       float inter_sweep_estimate,
-                       float intra_sweep_estimate) {
-    assert_proper_lock_protection();
-    _allocation_stats.compute_desired(count(),
-                                      inter_sweep_current,
-                                      inter_sweep_estimate,
-                                      intra_sweep_estimate);
-  }
-  ssize_t coal_desired() const {
-    return _allocation_stats.coal_desired();
-  }
-  void set_coal_desired(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coal_desired(v);
-  }
-
-  ssize_t surplus() const {
-    return _allocation_stats.surplus();
-  }
-  void set_surplus(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_surplus(v);
-  }
-  void increment_surplus() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_surplus();
-  }
-  void decrement_surplus() {
-    assert_proper_lock_protection();
-    _allocation_stats.decrement_surplus();
-  }
-
-  ssize_t bfr_surp() const {
-    return _allocation_stats.bfr_surp();
-  }
-  void set_bfr_surp(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_bfr_surp(v);
-  }
-  ssize_t prev_sweep() const {
-    return _allocation_stats.prev_sweep();
-  }
-  void set_prev_sweep(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_prev_sweep(v);
-  }
-  ssize_t before_sweep() const {
-    return _allocation_stats.before_sweep();
-  }
-  void set_before_sweep(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_before_sweep(v);
-  }
-
-  ssize_t coal_births() const {
-    return _allocation_stats.coal_births();
-  }
-  void set_coal_births(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coal_births(v);
-  }
-  void increment_coal_births() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_coal_births();
-  }
-
-  ssize_t coal_deaths() const {
-    return _allocation_stats.coal_deaths();
-  }
-  void set_coal_deaths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_coal_deaths(v);
-  }
-  void increment_coal_deaths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_coal_deaths();
-  }
-
-  ssize_t split_births() const {
-    return _allocation_stats.split_births();
-  }
-  void set_split_births(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_split_births(v);
-  }
-  void increment_split_births() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_split_births();
-  }
-
-  ssize_t split_deaths() const {
-    return _allocation_stats.split_deaths();
-  }
-  void set_split_deaths(ssize_t v) {
-    assert_proper_lock_protection();
-    _allocation_stats.set_split_deaths(v);
-  }
-  void increment_split_deaths() {
-    assert_proper_lock_protection();
-    _allocation_stats.increment_split_deaths();
-  }
-
-#ifndef PRODUCT
-  // For debugging.  The "_returned_bytes" in all the lists are summed
-  // and compared with the total number of bytes swept during a
-  // collection.
-  size_t returned_bytes() const { return _allocation_stats.returned_bytes(); }
-  void set_returned_bytes(size_t v) { _allocation_stats.set_returned_bytes(v); }
-  void increment_returned_bytes_by(size_t v) {
-    _allocation_stats.set_returned_bytes(_allocation_stats.returned_bytes() + v);
-  }
-  // Stats verification
-  void verify_stats() const;
-#endif  // NOT PRODUCT
-};
-
-#endif // SHARE_GC_CMS_ADAPTIVEFREELIST_HPP
--- a/src/hotspot/share/gc/cms/allocationStats.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/allocationStats.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/ostream.hpp"
-
-// Technically this should be derived from machine speed, and
-// ideally it would be dynamically adjusted.
-float AllocationStats::_threshold = ((float)CMS_SweepTimerThresholdMillis)/1000;
-
-void AllocationStats::initialize(bool split_birth)   {
-  AdaptivePaddedAverage* dummy =
-    new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
-                                                       CMS_FLSPadding);
-  _desired = 0;
-  _coal_desired = 0;
-  _surplus = 0;
-  _bfr_surp = 0;
-  _prev_sweep = 0;
-  _before_sweep = 0;
-  _coal_births = 0;
-  _coal_deaths = 0;
-  _split_births = (split_birth ? 1 : 0);
-  _split_deaths = 0;
-  _returned_bytes = 0;
-}
--- a/src/hotspot/share/gc/cms/allocationStats.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,154 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_ALLOCATIONSTATS_HPP
-#define SHARE_GC_CMS_ALLOCATIONSTATS_HPP
-
-#include "gc/shared/gcUtil.hpp"
-#include "logging/log.hpp"
-#include "runtime/globals.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-
-class AllocationStats {
-  // A duration threshold (in ms) used to filter
-  // possibly unreliable samples.
-  static float _threshold;
-
-  // We measure the demand between the end of the previous sweep and
-  // beginning of this sweep:
-  //   Count(end_last_sweep) - Count(start_this_sweep)
-  //     + split_births(between) - split_deaths(between)
-  // The above number divided by the time since the end of the
-  // previous sweep gives us a time rate of demand for blocks
-  // of this size. We compute a padded average of this rate as
-  // our current estimate for the time rate of demand for blocks
-  // of this size. Similarly, we keep a padded average for the time
-  // between sweeps. Our current estimate for demand for blocks of
-  // this size is then simply computed as the product of these two
-  // estimates.
-  AdaptivePaddedAverage _demand_rate_estimate;
-
-  ssize_t     _desired;          // Demand estimate computed as described above
-  ssize_t     _coal_desired;     // desired +/- small-percent for tuning coalescing
-
-  ssize_t     _surplus;          // count - (desired +/- small-percent),
-                                 // used to tune splitting in best fit
-  ssize_t     _bfr_surp;         // surplus at start of current sweep
-  ssize_t     _prev_sweep;       // count from end of previous sweep
-  ssize_t     _before_sweep;     // count from before current sweep
-  ssize_t     _coal_births;      // additional chunks from coalescing
-  ssize_t     _coal_deaths;      // loss from coalescing
-  ssize_t     _split_births;     // additional chunks from splitting
-  ssize_t     _split_deaths;     // loss from splitting
-  size_t      _returned_bytes;   // number of bytes returned to list.
- public:
-  void initialize(bool split_birth = false);
-
-  AllocationStats() {
-    initialize();
-  }
-
-  // The rate estimate is in blocks per second.
-  void compute_desired(size_t count,
-                       float inter_sweep_current,
-                       float inter_sweep_estimate,
-                       float intra_sweep_estimate) {
-    // If the latest inter-sweep time is below our granularity
-    // of measurement, we may call in here with
-    // inter_sweep_current == 0. However, even for suitably small
-    // but non-zero inter-sweep durations, we may not trust the accuracy
-    // of accumulated data, since it has not been "integrated"
-    // (read "low-pass-filtered") long enough, and would be
-    // vulnerable to noisy glitches. In such cases, we
-    // ignore the current sample and use currently available
-    // historical estimates.
-    assert(prev_sweep() + split_births() + coal_births()        // "Total Production Stock"
-           >= split_deaths() + coal_deaths() + (ssize_t)count, // "Current stock + depletion"
-           "Conservation Principle");
-    if (inter_sweep_current > _threshold) {
-      ssize_t demand = prev_sweep() - (ssize_t)count + split_births() + coal_births()
-                       - split_deaths() - coal_deaths();
-      assert(demand >= 0,
-             "Demand (" SSIZE_FORMAT ") should be non-negative for "
-             PTR_FORMAT " (size=" SIZE_FORMAT ")",
-             demand, p2i(this), count);
-      // Defensive: adjust for imprecision in event counting
-      if (demand < 0) {
-        demand = 0;
-      }
-      float old_rate = _demand_rate_estimate.padded_average();
-      float rate = ((float)demand)/inter_sweep_current;
-      _demand_rate_estimate.sample(rate);
-      float new_rate = _demand_rate_estimate.padded_average();
-      ssize_t old_desired = _desired;
-      float delta_ise = (CMSExtrapolateSweep ? intra_sweep_estimate : 0.0);
-      _desired = (ssize_t)(new_rate * (inter_sweep_estimate + delta_ise));
-      log_trace(gc, freelist)("demand: " SSIZE_FORMAT ", old_rate: %f, current_rate: %f, "
-                              "new_rate: %f, old_desired: " SSIZE_FORMAT ", new_desired: " SSIZE_FORMAT,
-                              demand, old_rate, rate, new_rate, old_desired, _desired);
-    }
-  }
-
-  ssize_t desired() const { return _desired; }
-  void set_desired(ssize_t v) { _desired = v; }
-
-  ssize_t coal_desired() const { return _coal_desired; }
-  void set_coal_desired(ssize_t v) { _coal_desired = v; }
-
-  ssize_t surplus() const { return _surplus; }
-  void set_surplus(ssize_t v) { _surplus = v; }
-  void increment_surplus() { _surplus++; }
-  void decrement_surplus() { _surplus--; }
-
-  ssize_t bfr_surp() const { return _bfr_surp; }
-  void set_bfr_surp(ssize_t v) { _bfr_surp = v; }
-  ssize_t prev_sweep() const { return _prev_sweep; }
-  void set_prev_sweep(ssize_t v) { _prev_sweep = v; }
-  ssize_t before_sweep() const { return _before_sweep; }
-  void set_before_sweep(ssize_t v) { _before_sweep = v; }
-
-  ssize_t coal_births() const { return _coal_births; }
-  void set_coal_births(ssize_t v) { _coal_births = v; }
-  void increment_coal_births() { _coal_births++; }
-
-  ssize_t coal_deaths() const { return _coal_deaths; }
-  void set_coal_deaths(ssize_t v) { _coal_deaths = v; }
-  void increment_coal_deaths() { _coal_deaths++; }
-
-  ssize_t split_births() const { return _split_births; }
-  void set_split_births(ssize_t v) { _split_births = v; }
-  void increment_split_births() { _split_births++; }
-
-  ssize_t split_deaths() const { return _split_deaths; }
-  void set_split_deaths(ssize_t v) { _split_deaths = v; }
-  void increment_split_deaths() { _split_deaths++; }
-
-  NOT_PRODUCT(
-    size_t returned_bytes() const { return _returned_bytes; }
-    void set_returned_bytes(size_t v) { _returned_bytes = v; }
-  )
-};
-
-#endif // SHARE_GC_CMS_ALLOCATIONSTATS_HPP
--- a/src/hotspot/share/gc/cms/cmsArguments.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsArguments.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/gcArguments.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/defaultStream.hpp"
-
-void CMSArguments::set_parnew_gc_flags() {
-  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC && !UseG1GC,
-         "control point invariant");
-  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
-  if (FLAG_IS_DEFAULT(ParallelGCThreads)) {
-    FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
-    assert(ParallelGCThreads > 0, "We should always have at least one thread by default");
-  } else if (ParallelGCThreads == 0) {
-    jio_fprintf(defaultStream::error_stream(),
-        "The ParNew GC can not be combined with -XX:ParallelGCThreads=0\n");
-    vm_exit(1);
-  }
-
-  // By default YoungPLABSize and OldPLABSize are set to 4096 and 1024 respectively,
-  // these settings are default for Parallel Scavenger. For ParNew+Tenured configuration
-  // we set them to 1024 and 1024.
-  // See CR 6362902.
-  if (FLAG_IS_DEFAULT(YoungPLABSize)) {
-    FLAG_SET_DEFAULT(YoungPLABSize, (intx)1024);
-  }
-  if (FLAG_IS_DEFAULT(OldPLABSize)) {
-    FLAG_SET_DEFAULT(OldPLABSize, (intx)1024);
-  }
-
-  // When using compressed oops, we use local overflow stacks,
-  // rather than using a global overflow list chained through
-  // the klass word of the object's pre-image.
-  if (UseCompressedOops && !ParGCUseLocalOverflow) {
-    if (!FLAG_IS_DEFAULT(ParGCUseLocalOverflow)) {
-      warning("Forcing +ParGCUseLocalOverflow: needed if using compressed references");
-    }
-    FLAG_SET_DEFAULT(ParGCUseLocalOverflow, true);
-  }
-  assert(ParGCUseLocalOverflow || !UseCompressedOops, "Error");
-}
-
-// Adjust some sizes to suit CMS and/or ParNew needs; these work well on
-// sparc/solaris for certain applications, but would gain from
-// further optimization and tuning efforts, and would almost
-// certainly gain from analysis of platform and environment.
-void CMSArguments::initialize() {
-  GCArguments::initialize();
-
-  assert(!UseSerialGC && !UseParallelOldGC && !UseParallelGC, "Error");
-  assert(UseConcMarkSweepGC, "CMS is expected to be on here");
-
-  // CMS space iteration, which FLSVerifyAllHeapreferences entails,
-  // insists that we hold the requisite locks so that the iteration is
-  // MT-safe. For the verification at start-up and shut-down, we don't
-  // yet have a good way of acquiring and releasing these locks,
-  // which are not visible at the CollectedHeap level. We want to
-  // be able to acquire these locks and then do the iteration rather
-  // than just disable the lock verification. This will be fixed under
-  // bug 4788986.
-  if (UseConcMarkSweepGC && FLSVerifyAllHeapReferences) {
-    if (VerifyDuringStartup) {
-      warning("Heap verification at start-up disabled "
-              "(due to current incompatibility with FLSVerifyAllHeapReferences)");
-      VerifyDuringStartup = false; // Disable verification at start-up
-    }
-
-    if (VerifyBeforeExit) {
-      warning("Heap verification at shutdown disabled "
-              "(due to current incompatibility with FLSVerifyAllHeapReferences)");
-      VerifyBeforeExit = false; // Disable verification at shutdown
-    }
-  }
-
-  if (!ClassUnloading) {
-    FLAG_SET_CMDLINE(CMSClassUnloadingEnabled, false);
-  }
-
-  // Set CMS global values
-  CompactibleFreeListSpace::set_cms_values();
-
-  // Turn off AdaptiveSizePolicy by default for cms until it is complete.
-  disable_adaptive_size_policy("UseConcMarkSweepGC");
-
-  set_parnew_gc_flags();
-
-  size_t max_heap = align_down(MaxHeapSize,
-                               CardTableRS::ct_max_alignment_constraint());
-
-  // Now make adjustments for CMS
-  intx   tenuring_default = (intx)6;
-  size_t young_gen_per_worker = CMSYoungGenPerWorker;
-
-  // Preferred young gen size for "short" pauses:
-  // upper bound depends on # of threads and NewRatio.
-  const size_t preferred_max_new_size_unaligned =
-    MIN2(max_heap/(NewRatio+1), ScaleForWordSize(young_gen_per_worker * ParallelGCThreads));
-  size_t preferred_max_new_size =
-    align_up(preferred_max_new_size_unaligned, os::vm_page_size());
-
-  // Unless explicitly requested otherwise, size young gen
-  // for "short" pauses ~ CMSYoungGenPerWorker*ParallelGCThreads
-
-  // If either MaxNewSize or NewRatio is set on the command line,
-  // assume the user is trying to set the size of the young gen.
-  if (FLAG_IS_DEFAULT(MaxNewSize) && FLAG_IS_DEFAULT(NewRatio)) {
-
-    // Set MaxNewSize to our calculated preferred_max_new_size unless
-    // NewSize was set on the command line and it is larger than
-    // preferred_max_new_size.
-    if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
-      FLAG_SET_ERGO(MaxNewSize, MAX2(NewSize, preferred_max_new_size));
-    } else {
-      FLAG_SET_ERGO(MaxNewSize, preferred_max_new_size);
-    }
-    log_trace(gc, heap)("CMS ergo set MaxNewSize: " SIZE_FORMAT, MaxNewSize);
-
-    // Code along this path potentially sets NewSize and OldSize
-    log_trace(gc, heap)("CMS set min_heap_size: " SIZE_FORMAT " initial_heap_size:  " SIZE_FORMAT " max_heap: " SIZE_FORMAT,
-                        MinHeapSize, InitialHeapSize, max_heap);
-    size_t min_new = preferred_max_new_size;
-    if (FLAG_IS_CMDLINE(NewSize)) {
-      min_new = NewSize;
-    }
-    if (max_heap > min_new && MinHeapSize > min_new) {
-      // Unless explicitly requested otherwise, make young gen
-      // at least min_new, and at most preferred_max_new_size.
-      if (FLAG_IS_DEFAULT(NewSize)) {
-        FLAG_SET_ERGO(NewSize, MAX2(NewSize, min_new));
-        FLAG_SET_ERGO(NewSize, MIN2(preferred_max_new_size, NewSize));
-        log_trace(gc, heap)("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
-      }
-      // Unless explicitly requested otherwise, size old gen
-      // so it's NewRatio x of NewSize.
-      if (FLAG_IS_DEFAULT(OldSize)) {
-        if (max_heap > NewSize) {
-          FLAG_SET_ERGO(OldSize, MIN2(NewRatio*NewSize, max_heap - NewSize));
-          log_trace(gc, heap)("CMS ergo set OldSize: " SIZE_FORMAT, OldSize);
-        }
-      }
-    }
-  }
-  // Unless explicitly requested otherwise, definitely
-  // promote all objects surviving "tenuring_default" scavenges.
-  if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
-      FLAG_IS_DEFAULT(SurvivorRatio)) {
-    FLAG_SET_ERGO(MaxTenuringThreshold, tenuring_default);
-  }
-  // If we decided above (or user explicitly requested)
-  // `promote all' (via MaxTenuringThreshold := 0),
-  // prefer minuscule survivor spaces so as not to waste
-  // space for (non-existent) survivors
-  if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
-    FLAG_SET_ERGO(SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
-  }
-
-  // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
-  // but rather the number of free blocks of a given size that are used when
-  // replenishing the local per-worker free list caches.
-  if (FLAG_IS_DEFAULT(OldPLABSize)) {
-    if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
-      // OldPLAB sizing manually turned off: Use a larger default setting,
-      // unless it was manually specified. This is because a too-low value
-      // will slow down scavenges.
-      FLAG_SET_ERGO(OldPLABSize, CompactibleFreeListSpaceLAB::_default_static_old_plab_size); // default value before 6631166
-    } else {
-      FLAG_SET_DEFAULT(OldPLABSize, CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
-    }
-  }
-
-  // If either of the static initialization defaults have changed, note this
-  // modification.
-  if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
-    CompactibleFreeListSpaceLAB::modify_initialization(OldPLABSize, OldPLABWeight);
-  }
-
-  log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
-}
-
-void CMSArguments::disable_adaptive_size_policy(const char* collector_name) {
-  if (UseAdaptiveSizePolicy) {
-    if (FLAG_IS_CMDLINE(UseAdaptiveSizePolicy)) {
-      warning("Disabling UseAdaptiveSizePolicy; it is incompatible with %s.",
-              collector_name);
-    }
-    FLAG_SET_DEFAULT(UseAdaptiveSizePolicy, false);
-  }
-}
-
-CollectedHeap* CMSArguments::create_heap() {
-  return new CMSHeap();
-}
--- a/src/hotspot/share/gc/cms/cmsArguments.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSARGUMENTS_HPP
-#define SHARE_GC_CMS_CMSARGUMENTS_HPP
-
-#include "gc/shared/gcArguments.hpp"
-#include "gc/shared/genArguments.hpp"
-
-class CollectedHeap;
-
-class CMSArguments : public GenArguments {
-private:
-  void disable_adaptive_size_policy(const char* collector_name);
-  void set_parnew_gc_flags();
-
-  virtual void initialize();
-  virtual CollectedHeap* create_heap();
-};
-
-#endif // SHARE_GC_CMS_CMSARGUMENTS_HPP
--- a/src/hotspot/share/gc/cms/cmsCardTable.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,470 +0,0 @@
-/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsCardTable.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/shared/cardTableBarrierSet.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/virtualspace.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-
-CMSCardTable::CMSCardTable(MemRegion whole_heap) :
-    CardTableRS(whole_heap, CMSPrecleaningEnabled /* scanned_concurrently */) {
-}
-
-// Returns the number of chunks necessary to cover "mr".
-size_t CMSCardTable::chunks_to_cover(MemRegion mr) {
-  return (size_t)(addr_to_chunk_index(mr.last()) -
-                  addr_to_chunk_index(mr.start()) + 1);
-}
-
-// Returns the index of the chunk in a stride which
-// covers the given address.
-uintptr_t CMSCardTable::addr_to_chunk_index(const void* addr) {
-  uintptr_t card = (uintptr_t) byte_for(addr);
-  return card / ParGCCardsPerStrideChunk;
-}
-
-void CMSCardTable::
-non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
-                                     OopsInGenClosure* cl,
-                                     CardTableRS* ct,
-                                     uint n_threads) {
-  assert(n_threads > 0, "expected n_threads > 0");
-  assert(n_threads <= ParallelGCThreads,
-         "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
-
-  // Make sure the LNC array is valid for the space.
-  CardValue** lowest_non_clean;
-  uintptr_t   lowest_non_clean_base_chunk_index;
-  size_t      lowest_non_clean_chunk_size;
-  get_LNC_array_for_space(sp, lowest_non_clean,
-                          lowest_non_clean_base_chunk_index,
-                          lowest_non_clean_chunk_size);
-
-  uint n_strides = n_threads * ParGCStridesPerThread;
-  SequentialSubTasksDone* pst = sp->par_seq_tasks();
-  // Sets the condition for completion of the subtask (how many threads
-  // need to finish in order to be done).
-  pst->set_n_threads(n_threads);
-  pst->set_n_tasks(n_strides);
-
-  uint stride = 0;
-  while (pst->try_claim_task(/* reference */ stride)) {
-    process_stride(sp, mr, stride, n_strides,
-                   cl, ct,
-                   lowest_non_clean,
-                   lowest_non_clean_base_chunk_index,
-                   lowest_non_clean_chunk_size);
-  }
-  if (pst->all_tasks_completed()) {
-    // Clear lowest_non_clean array for next time.
-    intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
-    uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
-    for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
-      intptr_t ind = ch - lowest_non_clean_base_chunk_index;
-      assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
-             "Bounds error");
-      lowest_non_clean[ind] = NULL;
-    }
-  }
-}
-
-void
-CMSCardTable::
-process_stride(Space* sp,
-               MemRegion used,
-               jint stride, int n_strides,
-               OopsInGenClosure* cl,
-               CardTableRS* ct,
-               CardValue** lowest_non_clean,
-               uintptr_t lowest_non_clean_base_chunk_index,
-               size_t    lowest_non_clean_chunk_size) {
-  // We go from higher to lower addresses here; it wouldn't help that much
-  // because of the strided parallelism pattern used here.
-
-  // Find the first card address of the first chunk in the stride that is
-  // at least "bottom" of the used region.
-  CardValue* start_card  = byte_for(used.start());
-  CardValue* end_card    = byte_after(used.last());
-  uintptr_t start_chunk = addr_to_chunk_index(used.start());
-  uintptr_t start_chunk_stride_num = start_chunk % n_strides;
-  CardValue* chunk_card_start;
-
-  if ((uintptr_t)stride >= start_chunk_stride_num) {
-    chunk_card_start = (start_card +
-                        (stride - start_chunk_stride_num) * ParGCCardsPerStrideChunk);
-  } else {
-    // Go ahead to the next chunk group boundary, then to the requested stride.
-    chunk_card_start = (start_card +
-                        (n_strides - start_chunk_stride_num + stride) * ParGCCardsPerStrideChunk);
-  }
-
-  while (chunk_card_start < end_card) {
-    // Even though we go from lower to higher addresses below, the
-    // strided parallelism can interleave the actual processing of the
-    // dirty pages in various ways. For a specific chunk within this
-    // stride, we take care to avoid double scanning or missing a card
-    // by suitably initializing the "min_done" field in process_chunk_boundaries()
-    // below, together with the dirty region extension accomplished in
-    // DirtyCardToOopClosure::do_MemRegion().
-    CardValue* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
-    // Invariant: chunk_mr should be fully contained within the "used" region.
-    MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
-                                   chunk_card_end >= end_card ?
-                                   used.end() : addr_for(chunk_card_end));
-    assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
-    assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
-
-    // This function is used by the parallel card table iteration.
-    const bool parallel = true;
-
-    DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
-                                                     cl->gen_boundary(),
-                                                     parallel);
-    ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
-
-
-    // Process the chunk.
-    process_chunk_boundaries(sp,
-                             dcto_cl,
-                             chunk_mr,
-                             used,
-                             lowest_non_clean,
-                             lowest_non_clean_base_chunk_index,
-                             lowest_non_clean_chunk_size);
-
-    // We want the LNC array updates above in process_chunk_boundaries
-    // to be visible before any of the card table value changes as a
-    // result of the dirty card iteration below.
-    OrderAccess::storestore();
-
-    // We want to clear the cards: clear_cl here does the work of finding
-    // contiguous dirty ranges of cards to process and clear.
-    clear_cl.do_MemRegion(chunk_mr);
-
-    // Find the next chunk of the stride.
-    chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
-  }
-}
-
-void
-CMSCardTable::
-process_chunk_boundaries(Space* sp,
-                         DirtyCardToOopClosure* dcto_cl,
-                         MemRegion chunk_mr,
-                         MemRegion used,
-                         CardValue** lowest_non_clean,
-                         uintptr_t lowest_non_clean_base_chunk_index,
-                         size_t    lowest_non_clean_chunk_size)
-{
-  // We must worry about non-array objects that cross chunk boundaries,
-  // because such objects are both precisely and imprecisely marked:
-  // .. if the head of such an object is dirty, the entire object
-  //    needs to be scanned, under the interpretation that this
-  //    was an imprecise mark
-  // .. if the head of such an object is not dirty, we can assume
-  //    precise marking and it's efficient to scan just the dirty
-  //    cards.
-  // In either case, each scanned reference must be scanned precisely
-  // once so as to avoid cloning of a young referent. For efficiency,
-  // our closures depend on this property and do not protect against
-  // double scans.
-
-  uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
-  assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
-  uintptr_t cur_chunk_index   = start_chunk_index - lowest_non_clean_base_chunk_index;
-
-  // First, set "our" lowest_non_clean entry, which would be
-  // used by the thread scanning an adjoining left chunk with
-  // a non-array object straddling the mutual boundary.
-  // Find the object that spans our boundary, if one exists.
-  // first_block is the block possibly straddling our left boundary.
-  HeapWord* first_block = sp->block_start(chunk_mr.start());
-  assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
-         "First chunk should always have a co-initial block");
-  // Does the block straddle the chunk's left boundary, and is it
-  // a non-array object?
-  if (first_block < chunk_mr.start()        // first block straddles left bdry
-      && sp->block_is_obj(first_block)      // first block is an object
-      && !(oop(first_block)->is_objArray()  // first block is not an array (arrays are precisely dirtied)
-           || oop(first_block)->is_typeArray())) {
-    // Find our least non-clean card, so that a left neighbor
-    // does not scan an object straddling the mutual boundary
-    // too far to the right, and attempt to scan a portion of
-    // that object twice.
-    CardValue* first_dirty_card = NULL;
-    CardValue* last_card_of_first_obj =
-        byte_for(first_block + sp->block_size(first_block) - 1);
-    CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
-    CardValue* last_card_of_cur_chunk = byte_for(chunk_mr.last());
-    CardValue* last_card_to_check = MIN2(last_card_of_cur_chunk, last_card_of_first_obj);
-    // Note that this does not need to go beyond our last card
-    // if our first object completely straddles this chunk.
-    for (CardValue* cur = first_card_of_cur_chunk;
-         cur <= last_card_to_check; cur++) {
-      CardValue val = *cur;
-      if (card_will_be_scanned(val)) {
-        first_dirty_card = cur;
-        break;
-      } else {
-        assert(!card_may_have_been_dirty(val), "Error");
-      }
-    }
-    if (first_dirty_card != NULL) {
-      assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
-      assert(lowest_non_clean[cur_chunk_index] == NULL,
-             "Write exactly once : value should be stable hereafter for this round");
-      lowest_non_clean[cur_chunk_index] = first_dirty_card;
-    }
-  } else {
-    // In this case we can help our neighbor by just asking them
-    // to stop at our first card (even though it may not be dirty).
-    assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
-    CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start());
-    lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
-  }
-
-  // Next, set our own max_to_do, which will strictly/exclusively bound
-  // the highest address that we will scan past the right end of our chunk.
-  HeapWord* max_to_do = NULL;
-  if (chunk_mr.end() < used.end()) {
-    // This is not the last chunk in the used region.
-    // What is our last block? We check the first block of
-    // the next (right) chunk rather than strictly check our last block
-    // because it's potentially more efficient to do so.
-    HeapWord* const last_block = sp->block_start(chunk_mr.end());
-    assert(last_block <= chunk_mr.end(), "In case this property changes.");
-    if ((last_block == chunk_mr.end())     // our last block does not straddle boundary
-        || !sp->block_is_obj(last_block)   // last_block isn't an object
-        || oop(last_block)->is_objArray()  // last_block is an array (precisely marked)
-        || oop(last_block)->is_typeArray()) {
-      max_to_do = chunk_mr.end();
-    } else {
-      assert(last_block < chunk_mr.end(), "Tautology");
-      // It is a non-array object that straddles the right boundary of this chunk.
-      // last_obj_card is the card corresponding to the start of the last object
-      // in the chunk.  Note that the last object may not start in
-      // the chunk.
-      CardValue* const last_obj_card = byte_for(last_block);
-      const CardValue val = *last_obj_card;
-      if (!card_will_be_scanned(val)) {
-        assert(!card_may_have_been_dirty(val), "Error");
-        // The card containing the head is not dirty.  Any marks on
-        // subsequent cards still in this chunk must have been made
-        // precisely; we can cap processing at the end of our chunk.
-        max_to_do = chunk_mr.end();
-      } else {
-        // The last object must be considered dirty, and extends onto the
-        // following chunk.  Look for a dirty card in that chunk that will
-        // bound our processing.
-        CardValue* limit_card = NULL;
-        const size_t last_block_size = sp->block_size(last_block);
-        CardValue* const last_card_of_last_obj =
-          byte_for(last_block + last_block_size - 1);
-        CardValue* const first_card_of_next_chunk = byte_for(chunk_mr.end());
-        // This search potentially goes a long distance looking
-        // for the next card that will be scanned, terminating
-        // at the end of the last_block, if no earlier dirty card
-        // is found.
-        assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
-               "last card of next chunk may be wrong");
-        for (CardValue* cur = first_card_of_next_chunk;
-             cur <= last_card_of_last_obj; cur++) {
-          const CardValue val = *cur;
-          if (card_will_be_scanned(val)) {
-            limit_card = cur; break;
-          } else {
-            assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
-          }
-        }
-        if (limit_card != NULL) {
-          max_to_do = addr_for(limit_card);
-          assert(limit_card != NULL && max_to_do != NULL, "Error");
-        } else {
-          // The following is a pessimistic value, because it's possible
-          // that a dirty card on a subsequent chunk has been cleared by
-          // the time we get to look at it; we'll correct for that further below,
-          // using the LNC array which records the least non-clean card
-          // before cards were cleared in a particular chunk.
-          limit_card = last_card_of_last_obj;
-          max_to_do = last_block + last_block_size;
-          assert(limit_card != NULL && max_to_do != NULL, "Error");
-        }
-        assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
-               "Bounds error.");
-        // It is possible that a dirty card for the last object may have been
-        // cleared before we had a chance to examine it. In that case, the value
-        // will have been logged in the LNC for that chunk.
-        // We need to examine as many chunks to the right as this object
-        // covers. However, we need to bound this checking to the largest
-        // entry in the LNC array: this is because the heap may expand
-        // after the LNC array has been created but before we reach this point,
-        // and the last block in our chunk may have been expanded to include
-        // the expansion delta (and possibly subsequently allocated from, so
-        // it wouldn't be sufficient to check whether that last block was
-        // or was not an object at this point).
-        uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
-                                              - lowest_non_clean_base_chunk_index;
-        const uintptr_t last_chunk_index    = addr_to_chunk_index(used.last())
-                                              - lowest_non_clean_base_chunk_index;
-        if (last_chunk_index_to_check > last_chunk_index) {
-          assert(last_block + last_block_size > used.end(),
-                 "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
-                 " does not exceed used.end() = " PTR_FORMAT ","
-                 " yet last_chunk_index_to_check " INTPTR_FORMAT
-                 " exceeds last_chunk_index " INTPTR_FORMAT,
-                 p2i(last_block), p2i(last_block + last_block_size),
-                 p2i(used.end()),
-                 last_chunk_index_to_check, last_chunk_index);
-          assert(sp->used_region().end() > used.end(),
-                 "Expansion did not happen: "
-                 "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
-                 p2i(sp->used_region().start()), p2i(sp->used_region().end()),
-                 p2i(used.start()), p2i(used.end()));
-          last_chunk_index_to_check = last_chunk_index;
-        }
-        for (uintptr_t lnc_index = cur_chunk_index + 1;
-             lnc_index <= last_chunk_index_to_check;
-             lnc_index++) {
-          CardValue* lnc_card = lowest_non_clean[lnc_index];
-          if (lnc_card != NULL) {
-            // we can stop at the first non-NULL entry we find
-            if (lnc_card <= limit_card) {
-              limit_card = lnc_card;
-              max_to_do = addr_for(limit_card);
-              assert(limit_card != NULL && max_to_do != NULL, "Error");
-            }
-            // In any case, we break now
-            break;
-          }  // else continue to look for a non-NULL entry if any
-        }
-        assert(limit_card != NULL && max_to_do != NULL, "Error");
-      }
-      assert(max_to_do != NULL, "OOPS 1 !");
-    }
-    assert(max_to_do != NULL, "OOPS 2!");
-  } else {
-    max_to_do = used.end();
-  }
-  assert(max_to_do != NULL, "OOPS 3!");
-  // Now we can set the closure we're using so it doesn't to beyond
-  // max_to_do.
-  dcto_cl->set_min_done(max_to_do);
-#ifndef PRODUCT
-  dcto_cl->set_last_bottom(max_to_do);
-#endif
-}
-
-void
-CMSCardTable::
-get_LNC_array_for_space(Space* sp,
-                        CardValue**& lowest_non_clean,
-                        uintptr_t& lowest_non_clean_base_chunk_index,
-                        size_t& lowest_non_clean_chunk_size) {
-
-  int       i        = find_covering_region_containing(sp->bottom());
-  MemRegion covered  = _covered[i];
-  size_t    n_chunks = chunks_to_cover(covered);
-
-  // Only the first thread to obtain the lock will resize the
-  // LNC array for the covered region.  Any later expansion can't affect
-  // the used_at_save_marks region.
-  // (I observed a bug in which the first thread to execute this would
-  // resize, and then it would cause "expand_and_allocate" that would
-  // increase the number of chunks in the covered region.  Then a second
-  // thread would come and execute this, see that the size didn't match,
-  // and free and allocate again.  So the first thread would be using a
-  // freed "_lowest_non_clean" array.)
-
-  // Do a dirty read here. If we pass the conditional then take the rare
-  // event lock and do the read again in case some other thread had already
-  // succeeded and done the resize.
-  int cur_collection = CMSHeap::heap()->total_collections();
-  // Updated _last_LNC_resizing_collection[i] must not be visible before
-  // _lowest_non_clean and friends are visible. Therefore use acquire/release
-  // to guarantee this on non TSO architecures.
-  if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
-    MutexLocker x(ParGCRareEvent_lock);
-    // This load_acquire is here for clarity only. The MutexLocker already fences.
-    if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
-      if (_lowest_non_clean[i] == NULL ||
-          n_chunks != _lowest_non_clean_chunk_size[i]) {
-
-        // Should we delete the old?
-        if (_lowest_non_clean[i] != NULL) {
-          assert(n_chunks != _lowest_non_clean_chunk_size[i],
-                 "logical consequence");
-          FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
-          _lowest_non_clean[i] = NULL;
-        }
-        // Now allocate a new one if necessary.
-        if (_lowest_non_clean[i] == NULL) {
-          _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
-          _lowest_non_clean_chunk_size[i]       = n_chunks;
-          _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
-          for (int j = 0; j < (int)n_chunks; j++)
-            _lowest_non_clean[i][j] = NULL;
-        }
-      }
-      // Make sure this gets visible only after _lowest_non_clean* was initialized
-      OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
-    }
-  }
-  // In any case, now do the initialization.
-  lowest_non_clean                  = _lowest_non_clean[i];
-  lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
-  lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
-}
-
-#ifdef ASSERT
-void CMSCardTable::verify_used_region_at_save_marks(Space* sp) const {
-  MemRegion ur    = sp->used_region();
-  MemRegion urasm = sp->used_region_at_save_marks();
-
-  if (!ur.contains(urasm)) {
-    log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? "
-                    "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
-                    "[" PTR_FORMAT ", " PTR_FORMAT ")",
-                    p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end()));
-    MemRegion ur2 = sp->used_region();
-    MemRegion urasm2 = sp->used_region_at_save_marks();
-    if (!ur.equals(ur2)) {
-      log_warning(gc)("CMS+ParNew: Flickering used_region()!!");
-    }
-    if (!urasm.equals(urasm2)) {
-      log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!");
-    }
-    ShouldNotReachHere();
-  }
-}
-#endif // ASSERT
--- a/src/hotspot/share/gc/cms/cmsCardTable.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSCARDTABLE_HPP
-#define SHARE_GC_CMS_CMSCARDTABLE_HPP
-
-#include "gc/shared/cardTableRS.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-class DirtyCardToOopClosure;
-class MemRegion;
-class OopsInGenClosure;
-class Space;
-
-class CMSCardTable : public CardTableRS {
-private:
-  // Returns the number of chunks necessary to cover "mr".
-  size_t chunks_to_cover(MemRegion mr);
-
-  // Returns the index of the chunk in a stride which
-  // covers the given address.
-  uintptr_t addr_to_chunk_index(const void* addr);
-
-  // Initializes "lowest_non_clean" to point to the array for the region
-  // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
-  // index of the corresponding to the first element of that array.
-  // Ensures that these arrays are of sufficient size, allocating if necessary.
-  // May be called by several threads concurrently.
-  void get_LNC_array_for_space(Space* sp,
-                               CardValue**& lowest_non_clean,
-                               uintptr_t& lowest_non_clean_base_chunk_index,
-                               size_t& lowest_non_clean_chunk_size);
-
-  // Apply cl, which must either itself apply dcto_cl or be dcto_cl,
-  // to the cards in the stride (of n_strides) within the given space.
-  void process_stride(Space* sp,
-                      MemRegion used,
-                      jint stride, int n_strides,
-                      OopsInGenClosure* cl,
-                      CardTableRS* ct,
-                      CardValue** lowest_non_clean,
-                      uintptr_t lowest_non_clean_base_chunk_index,
-                      size_t lowest_non_clean_chunk_size);
-
-  // Makes sure that chunk boundaries are handled appropriately, by
-  // adjusting the min_done of dcto_cl, and by using a special card-table
-  // value to indicate how min_done should be set.
-  void process_chunk_boundaries(Space* sp,
-                                DirtyCardToOopClosure* dcto_cl,
-                                MemRegion chunk_mr,
-                                MemRegion used,
-                                CardValue** lowest_non_clean,
-                                uintptr_t lowest_non_clean_base_chunk_index,
-                                size_t    lowest_non_clean_chunk_size);
-
-  virtual void verify_used_region_at_save_marks(Space* sp) const NOT_DEBUG_RETURN;
-
-protected:
-  // Work method used to implement non_clean_card_iterate_possibly_parallel()
-  // above in the parallel case.
-  virtual void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
-                                                    OopsInGenClosure* cl, CardTableRS* ct,
-                                                    uint n_threads);
-
-public:
-  CMSCardTable(MemRegion whole_heap);
-};
-
-#endif // SHARE_GC_CMS_CMSCARDTABLE_HPP
--- a/src/hotspot/share/gc/cms/cmsGCStats.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsGCStats.hpp"
-#include "gc/shared/gcUtil.inline.hpp"
-#include "runtime/globals.hpp"
-
-CMSGCStats::CMSGCStats() {
-    _avg_promoted       = new AdaptivePaddedNoZeroDevAverage(
-                                                  CMSExpAvgFactor,
-                                                  PromotedPadding);
-}
--- a/src/hotspot/share/gc/cms/cmsGCStats.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSGCSTATS_HPP
-#define SHARE_GC_CMS_CMSGCSTATS_HPP
-
-#include "gc/shared/gcStats.hpp"
-
-class CMSGCStats : public GCStats {
- public:
-  CMSGCStats();
-
-  virtual Name kind() {
-    return CMSGCStatsKind;
-  }
-};
-
-#endif // SHARE_GC_CMS_CMSGCSTATS_HPP
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,263 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsCardTable.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/genMemoryPools.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/memoryManager.hpp"
-#include "utilities/stack.inline.hpp"
-
-class CompactibleFreeListSpacePool : public CollectedMemoryPool {
-private:
-  CompactibleFreeListSpace* _space;
-public:
-  CompactibleFreeListSpacePool(CompactibleFreeListSpace* space,
-                               const char* name,
-                               size_t max_size,
-                               bool support_usage_threshold) :
-    CollectedMemoryPool(name, space->capacity(), max_size, support_usage_threshold),
-    _space(space) {
-  }
-
-  MemoryUsage get_memory_usage() {
-    size_t max_heap_size   = (available_for_allocation() ? max_size() : 0);
-    size_t used      = used_in_bytes();
-    size_t committed = _space->capacity();
-
-    return MemoryUsage(initial_size(), used, committed, max_heap_size);
-  }
-
-  size_t used_in_bytes() {
-    return _space->used_stable();
-  }
-};
-
-CMSHeap::CMSHeap() :
-    GenCollectedHeap(Generation::ParNew,
-                     Generation::ConcurrentMarkSweep,
-                     "ParNew:CMS"),
-    _workers(NULL),
-    _eden_pool(NULL),
-    _survivor_pool(NULL),
-    _old_pool(NULL) {
-}
-
-jint CMSHeap::initialize() {
-  jint status = GenCollectedHeap::initialize();
-  if (status != JNI_OK) return status;
-
-  _workers = new WorkGang("GC Thread", ParallelGCThreads,
-                          /* are_GC_task_threads */true,
-                          /* are_ConcurrentGC_threads */false);
-  if (_workers == NULL) {
-    return JNI_ENOMEM;
-  }
-  _workers->initialize_workers();
-
-  // If we are running CMS, create the collector responsible
-  // for collecting the CMS generations.
-  if (!create_cms_collector()) {
-    return JNI_ENOMEM;
-  }
-
-  return JNI_OK;
-}
-
-CardTableRS* CMSHeap::create_rem_set(const MemRegion& reserved_region) {
-  return new CMSCardTable(reserved_region);
-}
-
-void CMSHeap::initialize_serviceability() {
-  _young_manager = new GCMemoryManager("ParNew", "end of minor GC");
-  _old_manager = new GCMemoryManager("ConcurrentMarkSweep", "end of major GC");
-
-  ParNewGeneration* young = young_gen();
-  _eden_pool = new ContiguousSpacePool(young->eden(),
-                                       "Par Eden Space",
-                                       young->max_eden_size(),
-                                       false);
-
-  _survivor_pool = new SurvivorContiguousSpacePool(young,
-                                                   "Par Survivor Space",
-                                                   young->max_survivor_size(),
-                                                   false);
-
-  ConcurrentMarkSweepGeneration* old = (ConcurrentMarkSweepGeneration*) old_gen();
-  _old_pool = new CompactibleFreeListSpacePool(old->cmsSpace(),
-                                               "CMS Old Gen",
-                                               old->reserved().byte_size(),
-                                               true);
-
-  _young_manager->add_pool(_eden_pool);
-  _young_manager->add_pool(_survivor_pool);
-  young->set_gc_manager(_young_manager);
-
-  _old_manager->add_pool(_eden_pool);
-  _old_manager->add_pool(_survivor_pool);
-  _old_manager->add_pool(_old_pool);
-  old ->set_gc_manager(_old_manager);
-
-}
-
-CMSHeap* CMSHeap::heap() {
-  CollectedHeap* heap = Universe::heap();
-  assert(heap != NULL, "Uninitialized access to CMSHeap::heap()");
-  assert(heap->kind() == CollectedHeap::CMS, "Invalid name");
-  return static_cast<CMSHeap*>(heap);
-}
-
-void CMSHeap::gc_threads_do(ThreadClosure* tc) const {
-  assert(workers() != NULL, "should have workers here");
-  workers()->threads_do(tc);
-  ConcurrentMarkSweepThread::threads_do(tc);
-}
-
-void CMSHeap::print_gc_threads_on(outputStream* st) const {
-  assert(workers() != NULL, "should have workers here");
-  workers()->print_worker_threads_on(st);
-  ConcurrentMarkSweepThread::print_all_on(st);
-}
-
-void CMSHeap::print_on_error(outputStream* st) const {
-  GenCollectedHeap::print_on_error(st);
-  st->cr();
-  CMSCollector::print_on_error(st);
-}
-
-bool CMSHeap::create_cms_collector() {
-  assert(old_gen()->kind() == Generation::ConcurrentMarkSweep,
-         "Unexpected generation kinds");
-  CMSCollector* collector =
-    new CMSCollector((ConcurrentMarkSweepGeneration*) old_gen(), rem_set());
-
-  if (collector == NULL || !collector->completed_initialization()) {
-    if (collector) {
-      delete collector; // Be nice in embedded situation
-    }
-    vm_shutdown_during_initialization("Could not create CMS collector");
-    return false;
-  }
-  return true; // success
-}
-
-void CMSHeap::collect(GCCause::Cause cause) {
-  if (should_do_concurrent_full_gc(cause)) {
-    // Mostly concurrent full collection.
-    collect_mostly_concurrent(cause);
-  } else {
-    GenCollectedHeap::collect(cause);
-  }
-}
-
-bool CMSHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
-  switch (cause) {
-    case GCCause::_gc_locker:           return GCLockerInvokesConcurrent;
-    case GCCause::_java_lang_system_gc:
-    case GCCause::_dcmd_gc_run:         return ExplicitGCInvokesConcurrent;
-    default:                            return false;
-  }
-}
-
-void CMSHeap::collect_mostly_concurrent(GCCause::Cause cause) {
-  assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
-
-  MutexLocker ml(Heap_lock);
-  // Read the GC counts while holding the Heap_lock
-  unsigned int full_gc_count_before = total_full_collections();
-  unsigned int gc_count_before      = total_collections();
-  {
-    MutexUnlocker mu(Heap_lock);
-    VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
-    VMThread::execute(&op);
-  }
-}
-
-void CMSHeap::stop() {
-  ConcurrentMarkSweepThread::cmst()->stop();
-}
-
-void CMSHeap::safepoint_synchronize_begin() {
-  ConcurrentMarkSweepThread::synchronize(false);
-}
-
-void CMSHeap::safepoint_synchronize_end() {
-  ConcurrentMarkSweepThread::desynchronize(false);
-}
-
-void CMSHeap::cms_process_roots(StrongRootsScope* scope,
-                                bool young_gen_as_roots,
-                                ScanningOption so,
-                                bool only_strong_roots,
-                                OopsInGenClosure* root_closure,
-                                CLDClosure* cld_closure) {
-  MarkingCodeBlobClosure mark_code_closure(root_closure, !CodeBlobToOopClosure::FixRelocations);
-  CLDClosure* weak_cld_closure = only_strong_roots ? NULL : cld_closure;
-
-  process_roots(scope, so, root_closure, cld_closure, weak_cld_closure, &mark_code_closure);
-
-  if (young_gen_as_roots &&
-      _process_strong_tasks->try_claim_task(GCH_PS_younger_gens)) {
-    root_closure->set_generation(young_gen());
-    young_gen()->oop_iterate(root_closure);
-    root_closure->reset_generation();
-  }
-
-  _process_strong_tasks->all_tasks_completed(scope->n_threads());
-}
-
-void CMSHeap::gc_prologue(bool full) {
-  GenCollectedHeap::gc_prologue(full);
-};
-
-void CMSHeap::gc_epilogue(bool full) {
-  GenCollectedHeap::gc_epilogue(full);
-};
-
-GrowableArray<GCMemoryManager*> CMSHeap::memory_managers() {
-  GrowableArray<GCMemoryManager*> memory_managers(2);
-  memory_managers.append(_young_manager);
-  memory_managers.append(_old_manager);
-  return memory_managers;
-}
-
-GrowableArray<MemoryPool*> CMSHeap::memory_pools() {
-  GrowableArray<MemoryPool*> memory_pools(3);
-  memory_pools.append(_eden_pool);
-  memory_pools.append(_survivor_pool);
-  memory_pools.append(_old_pool);
-  return memory_pools;
-}
--- a/src/hotspot/share/gc/cms/cmsHeap.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,140 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSHEAP_HPP
-#define SHARE_GC_CMS_CMSHEAP_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/oopStorageParState.hpp"
-#include "utilities/growableArray.hpp"
-
-class CLDClosure;
-class GCMemoryManager;
-class MemoryPool;
-class OopsInGenClosure;
-class outputStream;
-class StrongRootsScope;
-class ThreadClosure;
-class WorkGang;
-
-class CMSHeap : public GenCollectedHeap {
-public:
-  CMSHeap();
-
-  // Returns JNI_OK on success
-  virtual jint initialize();
-  virtual CardTableRS* create_rem_set(const MemRegion& reserved_region);
-
-  // Convenience function to be used in situations where the heap type can be
-  // asserted to be this type.
-  static CMSHeap* heap();
-
-  virtual Name kind() const {
-    return CollectedHeap::CMS;
-  }
-
-  virtual const char* name() const {
-    return "Concurrent Mark Sweep";
-  }
-
-  WorkGang* workers() const { return _workers; }
-
-  virtual void print_gc_threads_on(outputStream* st) const;
-  virtual void gc_threads_do(ThreadClosure* tc) const;
-  virtual void print_on_error(outputStream* st) const;
-
-  // Perform a full collection of the heap; intended for use in implementing
-  // "System.gc". This implies as full a collection as the CollectedHeap
-  // supports. Caller does not hold the Heap_lock on entry.
-  void collect(GCCause::Cause cause);
-
-  void stop();
-  void safepoint_synchronize_begin();
-  void safepoint_synchronize_end();
-
-  virtual GrowableArray<GCMemoryManager*> memory_managers();
-  virtual GrowableArray<MemoryPool*> memory_pools();
-
-  // If "young_gen_as_roots" is false, younger generations are
-  // not scanned as roots; in this case, the caller must be arranging to
-  // scan the younger generations itself.  (For example, a generation might
-  // explicitly mark reachable objects in younger generations, to avoid
-  // excess storage retention.)
-  void cms_process_roots(StrongRootsScope* scope,
-                         bool young_gen_as_roots,
-                         ScanningOption so,
-                         bool only_strong_roots,
-                         OopsInGenClosure* root_closure,
-                         CLDClosure* cld_closure);
-
-  GCMemoryManager* old_manager() const { return _old_manager; }
-
-  ParNewGeneration* young_gen() const {
-    assert(_young_gen->kind() == Generation::ParNew, "Wrong generation type");
-    return static_cast<ParNewGeneration*>(_young_gen);
-  }
-
-  ConcurrentMarkSweepGeneration* old_gen() const {
-    assert(_old_gen->kind() == Generation::ConcurrentMarkSweep, "Wrong generation kind");
-    return static_cast<ConcurrentMarkSweepGeneration*>(_old_gen);
-  }
-
-  // Apply "cur->do_oop" or "older->do_oop" to all the oops in objects
-  // allocated since the last call to save_marks in the young generation.
-  // The "cur" closure is applied to references in the younger generation
-  // at "level", and the "older" closure to older generations.
-  template <typename OopClosureType1, typename OopClosureType2>
-  void oop_since_save_marks_iterate(OopClosureType1* cur,
-                                    OopClosureType2* older);
-
-private:
-  WorkGang* _workers;
-  MemoryPool* _eden_pool;
-  MemoryPool* _survivor_pool;
-  MemoryPool* _old_pool;
-
-  virtual void gc_prologue(bool full);
-  virtual void gc_epilogue(bool full);
-
-  virtual void initialize_serviceability();
-
-  // Accessor for memory state verification support
-  NOT_PRODUCT(
-    virtual size_t skip_header_HeapWords() { return CMSCollector::skip_header_HeapWords(); }
-  )
-
-  // Returns success or failure.
-  bool create_cms_collector();
-
-  // In support of ExplicitGCInvokesConcurrent functionality
-  bool should_do_concurrent_full_gc(GCCause::Cause cause);
-
-  void collect_mostly_concurrent(GCCause::Cause cause);
-};
-
-#endif // SHARE_GC_CMS_CMSHEAP_HPP
--- a/src/hotspot/share/gc/cms/cmsHeap.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSHEAP_INLINE_HPP
-#define SHARE_GC_CMS_CMSHEAP_INLINE_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/serial/defNewGeneration.inline.hpp"
-
-template <typename OopClosureType1, typename OopClosureType2>
-void CMSHeap::oop_since_save_marks_iterate(OopClosureType1* cur,
-                                           OopClosureType2* older) {
-  young_gen()->oop_since_save_marks_iterate(cur);
-  old_gen()->oop_since_save_marks_iterate(older);
-}
-
-#endif // SHARE_GC_CMS_CMSHEAP_INLINE_HPP
--- a/src/hotspot/share/gc/cms/cmsLockVerifier.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "memory/universe.hpp"
-#include "runtime/vmThread.hpp"
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except that it relaxes the
-// assertion somewhat for the parallel GC case, where VM thread
-// or the CMS thread might hold the lock on behalf of the parallel
-// threads. The second argument is in support of an extra locking
-// check for CFL spaces' free list locks.
-#ifndef PRODUCT
-void CMSLockVerifier::assert_locked(const Mutex* lock,
-                                    const Mutex* p_lock1,
-                                    const Mutex* p_lock2) {
-  if (!Universe::is_fully_initialized()) {
-    return;
-  }
-
-  Thread* myThread = Thread::current();
-
-  if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
-    assert(p_lock1 == NULL && p_lock2 == NULL, "Unexpected caller error");
-    if (myThread->is_ConcurrentGC_thread()) {
-      // This test might have to change in the future, if there can be
-      // multiple peer CMS threads.  But for now, if we're testing the CMS
-      assert(myThread == ConcurrentMarkSweepThread::cmst(),
-             "In CMS, CMS thread is the only Conc GC thread.");
-      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-             "CMS thread should have CMS token");
-    } else if (myThread->is_VM_thread()) {
-      assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-             "VM thread should have CMS token");
-    } else {
-      // Token should be held on our behalf by one of the other
-      // of CMS or VM thread; not enough easily testable
-      // state info to test which here.
-      assert(myThread->is_GC_task_thread(), "Unexpected thread type");
-    }
-    return;
-  }
-
-  if (myThread->is_VM_thread()
-      || myThread->is_ConcurrentGC_thread()
-      || myThread->is_Java_thread()) {
-    // Make sure that we are holding the associated lock.
-    assert_lock_strong(lock);
-    // The checking of p_lock is a spl case for CFLS' free list
-    // locks: we make sure that none of the parallel GC work gang
-    // threads are holding "sub-locks" of freeListLock(). We check only
-    // the parDictionaryAllocLock because the others are too numerous.
-    // This spl case code is somewhat ugly and any improvements
-    // are welcome.
-    assert(p_lock1 == NULL || !p_lock1->is_locked() || p_lock1->owned_by_self(),
-           "Possible race between this and parallel GC threads");
-    assert(p_lock2 == NULL || !p_lock2->is_locked() || p_lock2->owned_by_self(),
-           "Possible race between this and parallel GC threads");
-  } else if (myThread->is_GC_task_thread()) {
-    // Make sure that the VM or CMS thread holds lock on our behalf
-    // XXX If there were a concept of a gang_master for a (set of)
-    // gang_workers, we could have used the identity of that thread
-    // for checking ownership here; for now we just disjunct.
-    assert(lock->owner() == VMThread::vm_thread() ||
-           lock->owner() == ConcurrentMarkSweepThread::cmst(),
-           "Should be locked by VM thread or CMS thread on my behalf");
-    if (p_lock1 != NULL) {
-      assert_lock_strong(p_lock1);
-    }
-    if (p_lock2 != NULL) {
-      assert_lock_strong(p_lock2);
-    }
-  } else {
-    // Make sure we didn't miss some other thread type calling into here;
-    // perhaps as a result of future VM evolution.
-    ShouldNotReachHere();
-  }
-}
-#endif
--- a/src/hotspot/share/gc/cms/cmsLockVerifier.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
-#define SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
-
-#include "runtime/mutex.hpp"
-
-///////////// Locking verification specific to CMS //////////////
-// Much like "assert_lock_strong()", except
-// that it relaxes the assertion somewhat for the parallel GC case, where
-// main GC thread or the CMS thread might hold the lock on behalf of
-// the parallel threads.
-class CMSLockVerifier: AllStatic {
- public:
-  static void assert_locked(const Mutex* lock, const Mutex* p_lock1, const Mutex* p_lock2)
-    PRODUCT_RETURN;
-  static void assert_locked(const Mutex* lock, const Mutex* p_lock) {
-    assert_locked(lock, p_lock, NULL);
-  }
-  static void assert_locked(const Mutex* lock) {
-    assert_locked(lock, NULL);
-  }
-};
-
-#endif // SHARE_GC_CMS_CMSLOCKVERIFIER_HPP
--- a/src/hotspot/share/gc/cms/cmsOopClosures.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,333 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSOOPCLOSURES_HPP
-#define SHARE_GC_CMS_CMSOOPCLOSURES_HPP
-
-#include "gc/shared/genOopClosures.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "memory/iterator.hpp"
-
-/////////////////////////////////////////////////////////////////
-// Closures used by ConcurrentMarkSweepGeneration's collector
-/////////////////////////////////////////////////////////////////
-class ConcurrentMarkSweepGeneration;
-class CMSBitMap;
-class CMSMarkStack;
-class CMSCollector;
-class MarkFromRootsClosure;
-class ParMarkFromRootsClosure;
-
-class Mutex;
-
-// Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_DEFN                             \
-  void do_oop(oop obj);                              \
-  template <class T> inline void do_oop_work(T* p);
-
-// TODO: This duplication of the MetadataVisitingOopIterateClosure class is only needed
-//       because some CMS OopClosures derive from OopsInGenClosure. It would be
-//       good to get rid of them completely.
-class MetadataVisitingOopsInGenClosure: public OopsInGenClosure {
- public:
-  virtual bool do_metadata() { return true; }
-  virtual void do_klass(Klass* k);
-  virtual void do_cld(ClassLoaderData* cld);
-};
-
-class MarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  const MemRegion _span;
-  CMSBitMap*      _bitMap;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  MarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class ParMarkRefsIntoClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  const MemRegion _span;
-  CMSBitMap*      _bitMap;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParMarkRefsIntoClosure(MemRegion span, CMSBitMap* bitMap);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// A variant of the above used in certain kinds of CMS
-// marking verification.
-class MarkRefsIntoVerifyClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  const MemRegion _span;
-  CMSBitMap*      _verification_bm;
-  CMSBitMap*      _cms_bm;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  MarkRefsIntoVerifyClosure(MemRegion span, CMSBitMap* verification_bm,
-                            CMSBitMap* cms_bm);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// The non-parallel version (the parallel version appears further below).
-class PushAndMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  MemRegion     _span;
-  CMSBitMap*    _bit_map;
-  CMSBitMap*    _mod_union_table;
-  CMSMarkStack* _mark_stack;
-  bool          _concurrent_precleaning;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  PushAndMarkClosure(CMSCollector* collector,
-                     MemRegion span,
-                     ReferenceDiscoverer* rd,
-                     CMSBitMap* bit_map,
-                     CMSBitMap* mod_union_table,
-                     CMSMarkStack* mark_stack,
-                     bool concurrent_precleaning);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// In the parallel case, the bit map and the
-// reference processor are currently all shared. Access to
-// these shared mutable structures must use appropriate
-// synchronization (for instance, via CAS). The marking stack
-// used in the non-parallel case above is here replaced with
-// an OopTaskQueue structure to allow efficient work stealing.
-class ParPushAndMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  MemRegion     _span;
-  CMSBitMap*    _bit_map;
-  OopTaskQueue* _work_queue;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParPushAndMarkClosure(CMSCollector* collector,
-                        MemRegion span,
-                        ReferenceDiscoverer* rd,
-                        CMSBitMap* bit_map,
-                        OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// The non-parallel version (the parallel version appears further below).
-class MarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  MemRegion          _span;
-  CMSBitMap*         _bit_map;
-  CMSMarkStack*      _mark_stack;
-  PushAndMarkClosure _pushAndMarkClosure;
-  CMSCollector*      _collector;
-  Mutex*             _freelistLock;
-  bool               _yield;
-  // Whether closure is being used for concurrent precleaning
-  bool               _concurrent_precleaning;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  MarkRefsIntoAndScanClosure(MemRegion span,
-                             ReferenceDiscoverer* rd,
-                             CMSBitMap* bit_map,
-                             CMSBitMap* mod_union_table,
-                             CMSMarkStack* mark_stack,
-                             CMSCollector* collector,
-                             bool should_yield,
-                             bool concurrent_precleaning);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  void set_freelistLock(Mutex* m) {
-    _freelistLock = m;
-  }
-
- private:
-  inline void do_yield_check();
-  void do_yield_work();
-  bool take_from_overflow_list();
-};
-
-// In this, the parallel avatar of MarkRefsIntoAndScanClosure, the revisit
-// stack and the bitMap are shared, so access needs to be suitably
-// synchronized. An OopTaskQueue structure, supporting efficient
-// work stealing, replaces a CMSMarkStack for storing grey objects.
-class ParMarkRefsIntoAndScanClosure: public MetadataVisitingOopsInGenClosure {
- private:
-  MemRegion             _span;
-  CMSBitMap*            _bit_map;
-  OopTaskQueue*         _work_queue;
-  const uint            _low_water_mark;
-  ParPushAndMarkClosure _parPushAndMarkClosure;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParMarkRefsIntoAndScanClosure(CMSCollector* collector,
-                                 MemRegion span,
-                                 ReferenceDiscoverer* rd,
-                                 CMSBitMap* bit_map,
-                                 OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  void trim_queue(uint size);
-};
-
-// This closure is used during the concurrent marking phase
-// following the first checkpoint. Its use is buried in
-// the closure MarkFromRootsClosure.
-class PushOrMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector*   _collector;
-  MemRegion       _span;
-  CMSBitMap*      _bitMap;
-  CMSMarkStack*   _markStack;
-  HeapWord* const _finger;
-  MarkFromRootsClosure* const
-                  _parent;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  PushOrMarkClosure(CMSCollector* cms_collector,
-                    MemRegion span,
-                    CMSBitMap* bitMap,
-                    CMSMarkStack* markStack,
-                    HeapWord* finger,
-                    MarkFromRootsClosure* parent);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  // Deal with a stack overflow condition
-  void handle_stack_overflow(HeapWord* lost);
- private:
-  inline void do_yield_check();
-};
-
-// A parallel (MT) version of the above.
-// This closure is used during the concurrent marking phase
-// following the first checkpoint. Its use is buried in
-// the closure ParMarkFromRootsClosure.
-class ParPushOrMarkClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector*                  _collector;
-  MemRegion                      _whole_span;
-  MemRegion                      _span;       // local chunk
-  CMSBitMap*                     _bit_map;
-  OopTaskQueue*                  _work_queue;
-  CMSMarkStack*                  _overflow_stack;
-  HeapWord*  const               _finger;
-  HeapWord* volatile* const      _global_finger_addr;
-  ParMarkFromRootsClosure* const _parent;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParPushOrMarkClosure(CMSCollector* cms_collector,
-                       MemRegion span,
-                       CMSBitMap* bit_map,
-                       OopTaskQueue* work_queue,
-                       CMSMarkStack* mark_stack,
-                       HeapWord* finger,
-                       HeapWord* volatile* global_finger_addr,
-                       ParMarkFromRootsClosure* parent);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  // Deal with a stack overflow condition
-  void handle_stack_overflow(HeapWord* lost);
- private:
-  inline void do_yield_check();
-};
-
-// For objects in CMS generation, this closure marks
-// given objects (transitively) as being reachable/live.
-// This is currently used during the (weak) reference object
-// processing phase of the CMS final checkpoint step, as
-// well as during the concurrent precleaning of the discovered
-// reference lists.
-class CMSKeepAliveClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  const MemRegion _span;
-  CMSMarkStack* _mark_stack;
-  CMSBitMap*    _bit_map;
-  bool          _concurrent_precleaning;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  CMSKeepAliveClosure(CMSCollector* collector, MemRegion span,
-                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
-                      bool cpc);
-  bool    concurrent_precleaning() const { return _concurrent_precleaning; }
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class CMSInnerParMarkAndPushClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  MemRegion     _span;
-  OopTaskQueue* _work_queue;
-  CMSBitMap*    _bit_map;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  CMSInnerParMarkAndPushClosure(CMSCollector* collector,
-                                MemRegion span, CMSBitMap* bit_map,
-                                OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// A parallel (MT) version of the above, used when
-// reference processing is parallel; the only difference
-// is in the do_oop method.
-class CMSParKeepAliveClosure: public MetadataVisitingOopIterateClosure {
- private:
-  MemRegion     _span;
-  OopTaskQueue* _work_queue;
-  CMSBitMap*    _bit_map;
-  CMSInnerParMarkAndPushClosure
-                _mark_and_push;
-  const uint    _low_water_mark;
-  void trim_queue(uint max);
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  CMSParKeepAliveClosure(CMSCollector* collector, MemRegion span,
-                         CMSBitMap* bit_map, OopTaskQueue* work_queue);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-#endif // SHARE_GC_CMS_CMSOOPCLOSURES_HPP
--- a/src/hotspot/share/gc/cms/cmsOopClosures.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
-#define SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
-
-#include "gc/cms/cmsOopClosures.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-
-// MetadataVisitingOopIterateClosure and MetadataVisitingOopsInGenClosure are duplicated,
-// until we get rid of OopsInGenClosure.
-
-inline void MetadataVisitingOopsInGenClosure::do_klass(Klass* k) {
-  ClassLoaderData* cld = k->class_loader_data();
-  MetadataVisitingOopsInGenClosure::do_cld(cld);
-}
-
-inline void MetadataVisitingOopsInGenClosure::do_cld(ClassLoaderData* cld) {
-  cld->oops_do(this, ClassLoaderData::_claim_strong);
-}
-
-// Decode the oop and call do_oop on it.
-#define DO_OOP_WORK_IMPL(cls)                                \
-  template <class T> void cls::do_oop_work(T* p) {           \
-    T heap_oop = RawAccess<>::oop_load(p);                   \
-    if (!CompressedOops::is_null(heap_oop)) {                \
-      oop obj = CompressedOops::decode_not_null(heap_oop);   \
-      do_oop(obj);                                           \
-    }                                                        \
-  }                                                          \
-  inline void cls::do_oop(oop* p)       { do_oop_work(p); }  \
-  inline void cls::do_oop(narrowOop* p) { do_oop_work(p); }
-
-DO_OOP_WORK_IMPL(MarkRefsIntoClosure)
-DO_OOP_WORK_IMPL(ParMarkRefsIntoClosure)
-DO_OOP_WORK_IMPL(MarkRefsIntoVerifyClosure)
-DO_OOP_WORK_IMPL(PushAndMarkClosure)
-DO_OOP_WORK_IMPL(ParPushAndMarkClosure)
-DO_OOP_WORK_IMPL(MarkRefsIntoAndScanClosure)
-DO_OOP_WORK_IMPL(ParMarkRefsIntoAndScanClosure)
-
-// Trim our work_queue so its length is below max at return
-inline void ParMarkRefsIntoAndScanClosure::trim_queue(uint max) {
-  while (_work_queue->size() > max) {
-    oop newOop;
-    if (_work_queue->pop_local(newOop)) {
-      assert(oopDesc::is_oop(newOop), "Expected an oop");
-      assert(_bit_map->isMarked((HeapWord*)newOop),
-             "only grey objects on this stack");
-      // iterate over the oops in this oop, marking and pushing
-      // the ones in CMS heap (i.e. in _span).
-      newOop->oop_iterate(&_parPushAndMarkClosure);
-    }
-  }
-}
-
-DO_OOP_WORK_IMPL(PushOrMarkClosure)
-DO_OOP_WORK_IMPL(ParPushOrMarkClosure)
-DO_OOP_WORK_IMPL(CMSKeepAliveClosure)
-DO_OOP_WORK_IMPL(CMSInnerParMarkAndPushClosure)
-DO_OOP_WORK_IMPL(CMSParKeepAliveClosure)
-
-#endif // SHARE_GC_CMS_CMSOOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/cms/cmsVMOperations.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "memory/universe.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/os.hpp"
-#include "utilities/dtrace.hpp"
-
-//////////////////////////////////////////////////////////
-// Methods in abstract class VM_CMS_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Operation::verify_before_gc() {
-  if (VerifyBeforeGC &&
-      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
-    HandleMark hm;
-    FreelistLocker x(_collector);
-    MutexLocker  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    CMSHeap::heap()->prepare_for_verify();
-    Universe::verify();
-  }
-}
-
-void VM_CMS_Operation::verify_after_gc() {
-  if (VerifyAfterGC &&
-      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
-    HandleMark hm;
-    FreelistLocker x(_collector);
-    MutexLocker  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
-    Universe::verify();
-  }
-}
-
-bool VM_CMS_Operation::lost_race() const {
-  if (CMSCollector::abstract_state() == CMSCollector::Idling) {
-    // We lost a race to a foreground collection
-    // -- there's nothing to do
-    return true;
-  }
-  assert(CMSCollector::abstract_state() == legal_state(),
-         "Inconsistent collector state?");
-  return false;
-}
-
-bool VM_CMS_Operation::doit_prologue() {
-  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
-  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
-  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "Possible deadlock");
-
-  Heap_lock->lock();
-  if (lost_race()) {
-    assert(_prologue_succeeded == false, "Initialized in c'tor");
-    Heap_lock->unlock();
-  } else {
-    _prologue_succeeded = true;
-  }
-  return _prologue_succeeded;
-}
-
-void VM_CMS_Operation::doit_epilogue() {
-  assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
-  assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
-  assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "Possible deadlock");
-
-  if (Universe::has_reference_pending_list()) {
-    Heap_lock->notify_all();
-  }
-  Heap_lock->unlock();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Initial_Mark
-//////////////////////////////////////////////////////////
-void VM_CMS_Initial_Mark::doit() {
-  if (lost_race()) {
-    // Nothing to do.
-    return;
-  }
-  HS_PRIVATE_CMS_INITMARK_BEGIN();
-  GCIdMark gc_id_mark(_gc_id);
-
-  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
-
-  CMSHeap* heap = CMSHeap::heap();
-  GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
-
-  VM_CMS_Operation::verify_before_gc();
-
-  IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
-
-  VM_CMS_Operation::verify_after_gc();
-
-  _collector->_gc_timer_cm->register_gc_pause_end();
-
-  HS_PRIVATE_CMS_INITMARK_END();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Final_Remark_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Final_Remark::doit() {
-  if (lost_race()) {
-    // Nothing to do.
-    return;
-  }
-  HS_PRIVATE_CMS_REMARK_BEGIN();
-  GCIdMark gc_id_mark(_gc_id);
-
-  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
-
-  CMSHeap* heap = CMSHeap::heap();
-  GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
-
-  VM_CMS_Operation::verify_before_gc();
-
-  IsGCActiveMark x; // stop-world GC active
-  _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
-
-  VM_CMS_Operation::verify_after_gc();
-
-  _collector->save_heap_summary();
-  _collector->_gc_timer_cm->register_gc_pause_end();
-
-  HS_PRIVATE_CMS_REMARK_END();
-}
-
-// VM operation to invoke a concurrent collection of a
-// GenCollectedHeap heap.
-void VM_GenCollectFullConcurrent::doit() {
-  assert(Thread::current()->is_VM_thread(), "Should be VM thread");
-  assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
-
-  CMSHeap* heap = CMSHeap::heap();
-  if (_gc_count_before == heap->total_collections()) {
-    // The "full" of do_full_collection call below "forces"
-    // a collection; the second arg, 0, below ensures that
-    // only the young gen is collected. XXX In the future,
-    // we'll probably need to have something in this interface
-    // to say do this only if we are sure we will not bail
-    // out to a full collection in this attempt, but that's
-    // for the future.
-    assert(SafepointSynchronize::is_at_safepoint(),
-      "We can only be executing this arm of if at a safepoint");
-    GCCauseSetter gccs(heap, _gc_cause);
-    heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
-  } // Else no need for a foreground young gc
-  assert((_gc_count_before < heap->total_collections()) ||
-         (GCLocker::is_active() /* gc may have been skipped */
-          && (_gc_count_before == heap->total_collections())),
-         "total_collections() should be monotonically increasing");
-
-  MutexLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-  assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
-  if (heap->total_full_collections() == _full_gc_count_before) {
-    // Nudge the CMS thread to start a concurrent collection.
-    CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
-  } else {
-    assert(_full_gc_count_before < heap->total_full_collections(), "Error");
-    FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
-  }
-}
-
-bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
-  Thread* thr = Thread::current();
-  assert(thr != NULL, "Unexpected tid");
-  if (!thr->is_Java_thread()) {
-    assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
-    CMSHeap* heap = CMSHeap::heap();
-    if (_gc_count_before != heap->total_collections()) {
-      // No need to do a young gc, we'll just nudge the CMS thread
-      // in the doit() method above, to be executed soon.
-      assert(_gc_count_before < heap->total_collections(),
-             "total_collections() should be monotonically increasing");
-      return false;  // no need for foreground young gc
-    }
-  }
-  return true;       // may still need foreground young gc
-}
-
-
-void VM_GenCollectFullConcurrent::doit_epilogue() {
-  Thread* thr = Thread::current();
-  assert(thr->is_Java_thread(), "just checking");
-  JavaThread* jt = (JavaThread*)thr;
-
-  if (Universe::has_reference_pending_list()) {
-    Heap_lock->notify_all();
-  }
-  Heap_lock->unlock();
-
-  // It is fine to test whether completed collections has
-  // exceeded our request count without locking because
-  // the completion count is monotonically increasing;
-  // this will break for very long-running apps when the
-  // count overflows and wraps around. XXX fix me !!!
-  // e.g. at the rate of 1 full gc per ms, this could
-  // overflow in about 1000 years.
-  CMSHeap* heap = CMSHeap::heap();
-  if (_gc_cause != GCCause::_gc_locker &&
-      heap->total_full_collections_completed() <= _full_gc_count_before) {
-    // maybe we should change the condition to test _gc_cause ==
-    // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
-    // instead of _gc_cause != GCCause::_gc_locker
-    assert(GCCause::is_user_requested_gc(_gc_cause),
-           "the only way to get here if this was a System.gc()-induced GC");
-    assert(ExplicitGCInvokesConcurrent, "Error");
-    // Now, wait for witnessing concurrent gc cycle to complete,
-    // but do so in native mode, because we want to lock the
-    // FullGCEvent_lock, which may be needed by the VM thread
-    // or by the CMS thread, so we do not want to be suspended
-    // while holding that lock.
-    ThreadToNativeFromVM native(jt);
-    MutexLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-    // Either a concurrent or a stop-world full gc is sufficient
-    // witness to our request.
-    while (heap->total_full_collections_completed() <= _full_gc_count_before) {
-      FullGCCount_lock->wait_without_safepoint_check();
-    }
-  }
-}
--- a/src/hotspot/share/gc/cms/cmsVMOperations.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMSVMOPERATIONS_HPP
-#define SHARE_GC_CMS_CMSVMOPERATIONS_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcVMOperations.hpp"
-#include "runtime/vmOperations.hpp"
-
-// The VM_CMS_Operation is slightly different from
-// a VM_GC_Operation -- and would not have subclassed easily
-// to VM_GC_Operation without several changes to VM_GC_Operation.
-// To minimize the changes, we have replicated some of the VM_GC_Operation
-// functionality here. We will consolidate that back by doing subclassing
-// as appropriate in Dolphin.
-//
-//  VM_Operation
-//    VM_CMS_Operation
-//    - implements the common portion of work done in support
-//      of CMS' stop-world phases (initial mark and remark).
-//
-//      VM_CMS_Initial_Mark
-//      VM_CMS_Final_Mark
-//
-
-// Forward decl.
-class CMSCollector;
-
-class VM_CMS_Operation: public VM_Operation {
- protected:
-  CMSCollector*  _collector;                 // associated collector
-  bool           _prologue_succeeded;     // whether doit_prologue succeeded
-  uint           _gc_id;
-
-  bool lost_race() const;
-
- public:
-  VM_CMS_Operation(CMSCollector* collector):
-    _collector(collector),
-    _prologue_succeeded(false),
-    _gc_id(GCId::current()) {}
-  ~VM_CMS_Operation() {}
-
-  // The legal collector state for executing this CMS op.
-  virtual const CMSCollector::CollectorState legal_state() const = 0;
-
-  // Whether the pending list lock needs to be held
-  virtual const bool needs_pending_list_lock() const = 0;
-
-  // Execute operations in the context of the caller,
-  // prior to execution of the vm operation itself.
-  virtual bool doit_prologue();
-  // Execute operations in the context of the caller,
-  // following completion of the vm operation.
-  virtual void doit_epilogue();
-
-  virtual bool evaluate_at_safepoint() const { return true; }
-  virtual bool is_cheap_allocated() const { return false; }
-  virtual bool allow_nested_vm_operations() const  { return false; }
-  bool prologue_succeeded() const { return _prologue_succeeded; }
-
-  void verify_before_gc();
-  void verify_after_gc();
-};
-
-
-// VM_CMS_Operation for the initial marking phase of CMS.
-class VM_CMS_Initial_Mark: public VM_CMS_Operation {
- public:
-  VM_CMS_Initial_Mark(CMSCollector* _collector) :
-    VM_CMS_Operation(_collector) {}
-
-  virtual VMOp_Type type() const { return VMOp_CMS_Initial_Mark; }
-  virtual void doit();
-
-  virtual const CMSCollector::CollectorState legal_state() const {
-    return CMSCollector::InitialMarking;
-  }
-
-  virtual const bool needs_pending_list_lock() const {
-    return false;
-  }
-};
-
-// VM_CMS_Operation for the final remark phase of CMS.
-class VM_CMS_Final_Remark: public VM_CMS_Operation {
- public:
-  VM_CMS_Final_Remark(CMSCollector* _collector) :
-    VM_CMS_Operation(_collector) {}
-  virtual VMOp_Type type() const { return VMOp_CMS_Final_Remark; }
-  virtual void doit();
-
-  virtual const CMSCollector::CollectorState legal_state() const {
-    return CMSCollector::FinalMarking;
-  }
-
-  virtual const bool needs_pending_list_lock() const {
-    return true;
-  }
-};
-
-
-// VM operation to invoke a concurrent collection of the heap as a
-// GenCollectedHeap heap.
-class VM_GenCollectFullConcurrent: public VM_GC_Operation {
- public:
-  VM_GenCollectFullConcurrent(uint gc_count_before,
-                              uint full_gc_count_before,
-                              GCCause::Cause gc_cause)
-    : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
-  {
-    assert(FullGCCount_lock != NULL, "Error");
-  }
-  ~VM_GenCollectFullConcurrent() {}
-  virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
-  virtual void doit();
-  virtual void doit_epilogue();
-  virtual bool is_cheap_allocated() const { return false; }
-  virtual bool evaluate_at_safepoint() const;
-};
-
-#endif // SHARE_GC_CMS_CMSVMOPERATIONS_HPP
--- a/src/hotspot/share/gc/cms/cms_globals.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,429 +0,0 @@
-/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CMS_GLOBALS_HPP
-#define SHARE_GC_CMS_CMS_GLOBALS_HPP
-
-#define GC_CMS_FLAGS(develop,                                               \
-                     develop_pd,                                            \
-                     product,                                               \
-                     product_pd,                                            \
-                     diagnostic,                                            \
-                     diagnostic_pd,                                         \
-                     experimental,                                          \
-                     notproduct,                                            \
-                     manageable,                                            \
-                     product_rw,                                            \
-                     lp64_product,                                          \
-                     range,                                                 \
-                     constraint,                                            \
-                     writeable)                                             \
-  product(bool, UseCMSBestFit, true,                                        \
-          "Use CMS best fit allocation strategy")                           \
-                                                                            \
-  product(size_t, CMSOldPLABMax, 1024,                                      \
-          "Maximum size of CMS gen promotion LAB caches per worker "        \
-          "per block size")                                                 \
-          range(1, max_uintx)                                               \
-          constraint(CMSOldPLABMaxConstraintFunc,AfterMemoryInit)           \
-                                                                            \
-  product(size_t, CMSOldPLABMin, 16,                                        \
-          "Minimum size of CMS gen promotion LAB caches per worker "        \
-          "per block size")                                                 \
-          range(1, max_uintx)                                               \
-          constraint(CMSOldPLABMinConstraintFunc,AfterMemoryInit)           \
-                                                                            \
-  product(uintx, CMSOldPLABNumRefills, 4,                                   \
-          "Nominal number of refills of CMS gen promotion LAB cache "       \
-          "per worker per block size")                                      \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(bool, CMSOldPLABResizeQuicker, false,                             \
-          "React on-the-fly during a scavenge to a sudden "                 \
-          "change in block demand rate")                                    \
-                                                                            \
-  product(uintx, CMSOldPLABToleranceFactor, 4,                              \
-          "The tolerance of the phase-change detector for on-the-fly "      \
-          "PLAB resizing during a scavenge")                                \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSOldPLABReactivityFactor, 2,                             \
-          "The gain in the feedback loop for on-the-fly PLAB resizing "     \
-          "during a scavenge")                                              \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product_pd(size_t, CMSYoungGenPerWorker,                                  \
-          "The maximum size of young gen chosen by default per GC worker "  \
-          "thread available")                                               \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSIncrementalSafetyFactor, 10,                            \
-          "Percentage (0-100) used to add conservatism when computing the " \
-          "duty cycle")                                                     \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMSExpAvgFactor, 50,                                       \
-          "Percentage (0-100) used to weight the current sample when "      \
-          "computing exponential averages for CMS statistics")              \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMS_FLSWeight, 75,                                         \
-          "Percentage (0-100) used to weight the current sample when "      \
-          "computing exponentially decaying averages for CMS FLS "          \
-          "statistics")                                                     \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMS_FLSPadding, 1,                                         \
-          "The multiple of deviation from mean to use for buffering "       \
-          "against volatility in free list demand")                         \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, FLSCoalescePolicy, 2,                                      \
-          "CMS: aggressiveness level for coalescing, increasing "           \
-          "from 0 to 4")                                                    \
-          range(0, 4)                                                       \
-                                                                            \
-  product(bool, FLSAlwaysCoalesceLarge, false,                              \
-          "CMS: larger free blocks are always available for coalescing")    \
-                                                                            \
-  product(double, FLSLargestBlockCoalesceProximity, 0.99,                   \
-          "CMS: the smaller the percentage the greater the coalescing "     \
-          "force")                                                          \
-          range(0.0, 1.0)                                                   \
-                                                                            \
-  product(double, CMSSmallCoalSurplusPercent, 1.05,                         \
-          "CMS: the factor by which to inflate estimated demand of small "  \
-          "block sizes to prevent coalescing with an adjoining block")      \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(double, CMSLargeCoalSurplusPercent, 0.95,                         \
-          "CMS: the factor by which to inflate estimated demand of large "  \
-          "block sizes to prevent coalescing with an adjoining block")      \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(double, CMSSmallSplitSurplusPercent, 1.10,                        \
-          "CMS: the factor by which to inflate estimated demand of small "  \
-          "block sizes to prevent splitting to supply demand for smaller "  \
-          "blocks")                                                         \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(double, CMSLargeSplitSurplusPercent, 1.00,                        \
-          "CMS: the factor by which to inflate estimated demand of large "  \
-          "block sizes to prevent splitting to supply demand for smaller "  \
-          "blocks")                                                         \
-          range(0.0, DBL_MAX)                                               \
-                                                                            \
-  product(bool, CMSExtrapolateSweep, false,                                 \
-          "CMS: cushion for block demand during sweep")                     \
-                                                                            \
-  product(uintx, CMS_SweepWeight, 75,                                       \
-          "Percentage (0-100) used to weight the current sample when "      \
-          "computing exponentially decaying average for inter-sweep "       \
-          "duration")                                                       \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMS_SweepPadding, 1,                                       \
-          "The multiple of deviation from mean to use for buffering "       \
-          "against volatility in inter-sweep duration")                     \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
-          "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
-          "duration exceeds this threshold in milliseconds")                \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(bool, CMSClassUnloadingEnabled, true,                             \
-          "Whether class unloading enabled when using CMS GC")              \
-                                                                            \
-  product(uintx, CMSClassUnloadingMaxInterval, 0,                           \
-          "When CMS class unloading is enabled, the maximum CMS cycle "     \
-          "count for which classes may not be unloaded")                    \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSIndexedFreeListReplenish, 4,                            \
-          "Replenish an indexed free list with this number of chunks")      \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(bool, CMSReplenishIntermediate, true,                             \
-          "Replenish all intermediate free-list caches")                    \
-                                                                            \
-  product(bool, CMSSplitIndexedFreeListBlocks, true,                        \
-          "When satisfying batched demand, split blocks from the "          \
-          "IndexedFreeList whose size is a multiple of requested size")     \
-                                                                            \
-  product(bool, CMSLoopWarn, false,                                         \
-          "Warn in case of excessive CMS looping")                          \
-                                                                            \
-  notproduct(bool, CMSMarkStackOverflowALot, false,                         \
-          "Simulate frequent marking stack / work queue overflow")          \
-                                                                            \
-  notproduct(uintx, CMSMarkStackOverflowInterval, 1000,                     \
-          "An \"interval\" counter that determines how frequently "         \
-          "to simulate overflow; a smaller number increases frequency")     \
-                                                                            \
-  product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
-          "Maximum number of abortable preclean iterations, if > 0")        \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
-          "Maximum time in abortable preclean (in milliseconds)")           \
-          range(0, max_intx)                                                \
-                                                                            \
-  product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
-          "Nominal minimum work per abortable preclean iteration")          \
-          range(0, max_uintx)                                               \
-                                                                            \
-  manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
-          "Time that we sleep between iterations when not given "           \
-          "enough work per iteration")                                      \
-          range(0, max_intx)                                                \
-                                                                            \
-  /* 4096 = CardTable::card_size_in_words * BitsPerWord */                  \
-  product(size_t, CMSRescanMultiple, 32,                                    \
-          "Size (in cards) of CMS parallel rescan task")                    \
-          range(1, SIZE_MAX / 4096)                                         \
-          constraint(CMSRescanMultipleConstraintFunc,AfterMemoryInit)       \
-                                                                            \
-  /* 4096 = CardTable::card_size_in_words * BitsPerWord */                  \
-  product(size_t, CMSConcMarkMultiple, 32,                                  \
-          "Size (in cards) of CMS concurrent MT marking task")              \
-          range(1, SIZE_MAX / 4096)                                         \
-          constraint(CMSConcMarkMultipleConstraintFunc,AfterMemoryInit)     \
-                                                                            \
-  product(bool, CMSAbortSemantics, false,                                   \
-          "Whether abort-on-overflow semantics is implemented")             \
-                                                                            \
-  product(bool, CMSParallelInitialMarkEnabled, true,                        \
-          "Use the parallel initial mark.")                                 \
-                                                                            \
-  product(bool, CMSParallelRemarkEnabled, true,                             \
-          "Whether parallel remark enabled (only if ParNewGC)")             \
-                                                                            \
-  product(bool, CMSParallelSurvivorRemarkEnabled, true,                     \
-          "Whether parallel remark of survivor space "                      \
-          "enabled (effective only if CMSParallelRemarkEnabled)")           \
-                                                                            \
-  product(bool, CMSPLABRecordAlways, true,                                  \
-          "Always record survivor space PLAB boundaries (effective only "   \
-          "if CMSParallelSurvivorRemarkEnabled)")                           \
-                                                                            \
-  product(bool, CMSEdenChunksRecordAlways, true,                            \
-          "Always record eden chunks used for the parallel initial mark "   \
-          "or remark of eden")                                              \
-                                                                            \
-  product(bool, CMSConcurrentMTEnabled, true,                               \
-          "Whether multi-threaded concurrent work enabled "                 \
-          "(effective only if ParNewGC)")                                   \
-                                                                            \
-  product(bool, CMSPrecleaningEnabled, true,                                \
-          "Whether concurrent precleaning enabled")                         \
-                                                                            \
-  product(uintx, CMSPrecleanIter, 3,                                        \
-          "Maximum number of precleaning iteration passes")                 \
-          range(0, 9)                                                       \
-                                                                            \
-  product(uintx, CMSPrecleanDenominator, 3,                                 \
-          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
-          "ratio")                                                          \
-          range(1, max_uintx)                                               \
-          constraint(CMSPrecleanDenominatorConstraintFunc,AfterErgo)        \
-                                                                            \
-  product(uintx, CMSPrecleanNumerator, 2,                                   \
-          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
-          "ratio")                                                          \
-          range(0, max_uintx-1)                                             \
-          constraint(CMSPrecleanNumeratorConstraintFunc,AfterErgo)          \
-                                                                            \
-  product(bool, CMSPrecleanRefLists1, true,                                 \
-          "Preclean ref lists during (initial) preclean phase")             \
-                                                                            \
-  product(bool, CMSPrecleanRefLists2, false,                                \
-          "Preclean ref lists during abortable preclean phase")             \
-                                                                            \
-  product(bool, CMSPrecleanSurvivors1, false,                               \
-          "Preclean survivors during (initial) preclean phase")             \
-                                                                            \
-  product(bool, CMSPrecleanSurvivors2, true,                                \
-          "Preclean survivors during abortable preclean phase")             \
-                                                                            \
-  product(uintx, CMSPrecleanThreshold, 1000,                                \
-          "Do not iterate again if number of dirty cards is less than this")\
-          range(100, max_uintx)                                             \
-                                                                            \
-  product(bool, CMSCleanOnEnter, true,                                      \
-          "Clean-on-enter optimization for reducing number of dirty cards") \
-                                                                            \
-  product(uintx, CMSRemarkVerifyVariant, 1,                                 \
-          "Choose variant (1,2) of verification following remark")          \
-          range(1, 2)                                                       \
-                                                                            \
-  product(size_t, CMSScheduleRemarkEdenSizeThreshold, 2*M,                  \
-          "If Eden size is below this, do not try to schedule remark")      \
-          range(0, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSScheduleRemarkEdenPenetration, 50,                      \
-          "The Eden occupancy percentage (0-100) at which "                 \
-          "to try and schedule remark pause")                               \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMSScheduleRemarkSamplingRatio, 5,                         \
-          "Start sampling eden top at least before young gen "              \
-          "occupancy reaches 1/<ratio> of the size at which "               \
-          "we plan to schedule remark")                                     \
-          range(1, max_uintx)                                               \
-                                                                            \
-  product(uintx, CMSSamplingGrain, 16*K,                                    \
-          "The minimum distance between eden samples for CMS (see above)")  \
-          range(ObjectAlignmentInBytes, max_uintx)                          \
-          constraint(CMSSamplingGrainConstraintFunc,AfterMemoryInit)        \
-                                                                            \
-  product(bool, CMSScavengeBeforeRemark, false,                             \
-          "Attempt scavenge before the CMS remark step")                    \
-                                                                            \
-  product(uintx, CMSWorkQueueDrainThreshold, 10,                            \
-          "Don't drain below this size per parallel worker/thief")          \
-          range(1, max_juint)                                               \
-          constraint(CMSWorkQueueDrainThresholdConstraintFunc,AfterErgo)    \
-                                                                            \
-  manageable(intx, CMSWaitDuration, 2000,                                   \
-          "Time in milliseconds that CMS thread waits for young GC")        \
-          range(min_jint, max_jint)                                         \
-                                                                            \
-  develop(uintx, CMSCheckInterval, 1000,                                    \
-          "Interval in milliseconds that CMS thread checks if it "          \
-          "should start a collection cycle")                                \
-                                                                            \
-  product(bool, CMSYield, true,                                             \
-          "Yield between steps of CMS")                                     \
-                                                                            \
-  product(size_t, CMSBitMapYieldQuantum, 10*M,                              \
-          "Bitmap operations should process at most this many bits "        \
-          "between yields")                                                 \
-          range(1, max_uintx)                                               \
-          constraint(CMSBitMapYieldQuantumConstraintFunc,AfterMemoryInit)   \
-                                                                            \
-  product(bool, CMSPrintChunksInDump, false,                                \
-          "If logging for the \"gc\" and \"promotion\" tags is enabled on"  \
-          "trace level include more detailed information about the"         \
-          "free chunks")                                                    \
-                                                                            \
-  product(bool, CMSPrintObjectsInDump, false,                               \
-          "If logging for the \"gc\" and \"promotion\" tags is enabled on"  \
-          "trace level include more detailed information about the"         \
-          "allocated objects")                                              \
-                                                                            \
-  diagnostic(bool, FLSVerifyAllHeapReferences, false,                       \
-          "Verify that all references across the FLS boundary "             \
-          "are to valid objects")                                           \
-                                                                            \
-  diagnostic(bool, FLSVerifyLists, false,                                   \
-          "Do lots of (expensive) FreeListSpace verification")              \
-                                                                            \
-  diagnostic(bool, FLSVerifyIndexTable, false,                              \
-          "Do lots of (expensive) FLS index table verification")            \
-                                                                            \
-  product(uintx, CMSTriggerRatio, 80,                                       \
-          "Percentage of MinHeapFreeRatio in CMS generation that is "       \
-          "allocated before a CMS collection cycle commences")              \
-          range(0, 100)                                                     \
-                                                                            \
-  product(uintx, CMSBootstrapOccupancy, 50,                                 \
-          "Percentage CMS generation occupancy at which to "                \
-          "initiate CMS collection for bootstrapping collection stats")     \
-          range(0, 100)                                                     \
-                                                                            \
-  product(intx, CMSInitiatingOccupancyFraction, -1,                         \
-          "Percentage CMS generation occupancy to start a CMS collection "  \
-          "cycle. A negative value means that CMSTriggerRatio is used")     \
-          range(min_intx, 100)                                              \
-                                                                            \
-  manageable(intx, CMSTriggerInterval, -1,                                  \
-          "Commence a CMS collection cycle (at least) every so many "       \
-          "milliseconds (0 permanently, -1 disabled)")                      \
-          range(-1, max_intx)                                               \
-                                                                            \
-  product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
-          "Only use occupancy as a criterion for starting a CMS collection")\
-                                                                            \
-  product(uintx, CMSIsTooFullPercentage, 98,                                \
-          "An absolute ceiling above which CMS will always consider the "   \
-          "unloading of classes when class unloading is enabled")           \
-          range(0, 100)                                                     \
-                                                                            \
-  develop(bool, CMSTestInFreeList, false,                                   \
-          "Check if the coalesced range is already in the "                 \
-          "free lists as claimed")                                          \
-                                                                            \
-  notproduct(bool, CMSVerifyReturnedBytes, false,                           \
-          "Check that all the garbage collected was returned to the "       \
-          "free lists")                                                     \
-                                                                            \
-  diagnostic(bool, BindCMSThreadToCPU, false,                               \
-          "Bind CMS Thread to CPU if possible")                             \
-                                                                            \
-  diagnostic(uintx, CPUForCMSThread, 0,                                     \
-          "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, CMSCoordinatorYieldSleepCount, 10,                         \
-          "Number of times the coordinator GC thread will sleep while "     \
-          "yielding before giving up and resuming GC")                      \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(uintx, CMSYieldSleepCount, 0,                                     \
-          "Number of times a GC thread (minus the coordinator) "            \
-          "will sleep while yielding before giving up and resuming GC")     \
-          range(0, max_juint)                                               \
-                                                                            \
-  product(bool, ParGCUseLocalOverflow, false,                               \
-          "Instead of a global overflow list, use local overflow stacks")   \
-                                                                            \
-  product(bool, ParGCTrimOverflow, true,                                    \
-          "Eagerly trim the local overflow lists "                          \
-          "(when ParGCUseLocalOverflow)")                                   \
-                                                                            \
-  notproduct(bool, ParGCWorkQueueOverflowALot, false,                       \
-          "Simulate work queue overflow in ParNew")                         \
-                                                                            \
-  notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000,                   \
-          "An `interval' counter that determines how frequently "           \
-          "we simulate overflow; a smaller number increases frequency")     \
-                                                                            \
-  product(uintx, ParGCDesiredObjsFromOverflowList, 20,                      \
-          "The desired number of objects to claim from the overflow list")  \
-          range(0, max_uintx)                                               \
-                                                                            \
-  diagnostic(uintx, ParGCStridesPerThread, 2,                               \
-          "The number of strides per worker thread that we divide up the "  \
-          "card table scanning work into")                                  \
-          range(1, max_uintx)                                               \
-          constraint(ParGCStridesPerThreadConstraintFunc,AfterErgo)         \
-                                                                            \
-  diagnostic(intx, ParGCCardsPerStrideChunk, 256,                           \
-          "The number of cards in each chunk of the parallel chunks used "  \
-          "during card table scanning")                                     \
-          range(1, max_intx)                                                \
-          constraint(ParGCCardsPerStrideChunkConstraintFunc,AfterMemoryInit)
-
-#endif // SHARE_GC_CMS_CMS_GLOBALS_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3141 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/blockOffsetTable.inline.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/binaryTreeDictionary.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/globals.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/init.hpp"
-#include "runtime/java.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/align.hpp"
-#include "utilities/copy.hpp"
-
-// Specialize for AdaptiveFreeList which tries to avoid
-// splitting a chunk of a size that is under populated in favor of
-// an over populated size.  The general get_better_list() just returns
-// the current list.
-template <>
-TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
-TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
-  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
-  // A candidate chunk has been found.  If it is already under
-  // populated, get a chunk associated with the hint for this
-  // chunk.
-
-  TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
-  if (curTL->surplus() <= 0) {
-    /* Use the hint to find a size with a surplus, and reset the hint. */
-    TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
-    while (hintTL->hint() != 0) {
-      assert(hintTL->hint() > hintTL->size(),
-        "hint points in the wrong direction");
-      hintTL = dictionary->find_list(hintTL->hint());
-      assert(curTL != hintTL, "Infinite loop");
-      if (hintTL == NULL ||
-          hintTL == curTL /* Should not happen but protect against it */ ) {
-        // No useful hint.  Set the hint to NULL and go on.
-        curTL->set_hint(0);
-        break;
-      }
-      assert(hintTL->size() > curTL->size(), "hint is inconsistent");
-      if (hintTL->surplus() > 0) {
-        // The hint led to a list that has a surplus.  Use it.
-        // Set the hint for the candidate to an overpopulated
-        // size.
-        curTL->set_hint(hintTL->size());
-        // Change the candidate.
-        curTL = hintTL;
-        break;
-      }
-    }
-  }
-  return curTL;
-}
-
-void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
-  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
-  if (nd) {
-    if (split) {
-      if (birth) {
-        nd->increment_split_births();
-        nd->increment_surplus();
-      }  else {
-        nd->increment_split_deaths();
-        nd->decrement_surplus();
-      }
-    } else {
-      if (birth) {
-        nd->increment_coal_births();
-        nd->increment_surplus();
-      } else {
-        nd->increment_coal_deaths();
-        nd->decrement_surplus();
-      }
-    }
-  }
-  // A list for this size may not be found (nd == 0) if
-  //   This is a death where the appropriate list is now
-  //     empty and has been removed from the list.
-  //   This is a birth associated with a LinAB.  The chunk
-  //     for the LinAB is not in the dictionary.
-}
-
-bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
-  if (FLSAlwaysCoalesceLarge) return true;
-
-  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
-  // None of requested size implies overpopulated.
-  return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
-         list_of_size->count() > list_of_size->coal_desired();
-}
-
-// For each list in the tree, calculate the desired, desired
-// coalesce, count before sweep, and surplus before sweep.
-class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  double _percentage;
-  float _inter_sweep_current;
-  float _inter_sweep_estimate;
-  float _intra_sweep_estimate;
-
- public:
-  BeginSweepClosure(double p, float inter_sweep_current,
-                              float inter_sweep_estimate,
-                              float intra_sweep_estimate) :
-   _percentage(p),
-   _inter_sweep_current(inter_sweep_current),
-   _inter_sweep_estimate(inter_sweep_estimate),
-   _intra_sweep_estimate(intra_sweep_estimate) { }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    double coalSurplusPercent = _percentage;
-    fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
-    fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
-    fl->set_before_sweep(fl->count());
-    fl->set_bfr_surp(fl->surplus());
-  }
-};
-
-void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
-  float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
-  BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
-                        inter_sweep_estimate,
-                        intra_sweep_estimate);
-  bsc.do_tree(root());
-}
-
-// Calculate surpluses for the lists in the tree.
-class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  double percentage;
- public:
-  setTreeSurplusClosure(double v) { percentage = v; }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    double splitSurplusPercent = percentage;
-    fl->set_surplus(fl->count() -
-                   (ssize_t)((double)fl->desired() * splitSurplusPercent));
-  }
-};
-
-void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
-  setTreeSurplusClosure sts(splitSurplusPercent);
-  sts.do_tree(root());
-}
-
-// Set hints for the lists in the tree.
-class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  size_t hint;
- public:
-  setTreeHintsClosure(size_t v) { hint = v; }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    fl->set_hint(hint);
-    assert(fl->hint() == 0 || fl->hint() > fl->size(),
-      "Current hint is inconsistent");
-    if (fl->surplus() > 0) {
-      hint = fl->size();
-    }
-  }
-};
-
-void AFLBinaryTreeDictionary::set_tree_hints(void) {
-  setTreeHintsClosure sth(0);
-  sth.do_tree(root());
-}
-
-// Save count before previous sweep and splits and coalesces.
-class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    fl->set_prev_sweep(fl->count());
-    fl->set_coal_births(0);
-    fl->set_coal_deaths(0);
-    fl->set_split_births(0);
-    fl->set_split_deaths(0);
-  }
-};
-
-void AFLBinaryTreeDictionary::clear_tree_census(void) {
-  clearTreeCensusClosure ctc;
-  ctc.do_tree(root());
-}
-
-// Do reporting and post sweep clean up.
-void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
-  // Does walking the tree 3 times hurt?
-  set_tree_surplus(splitSurplusPercent);
-  set_tree_hints();
-  LogTarget(Trace, gc, freelist, stats) log;
-  if (log.is_enabled()) {
-    LogStream out(log);
-    report_statistics(&out);
-  }
-  clear_tree_census();
-}
-
-// Print census information - counts, births, deaths, etc.
-// for each list in the tree.  Also print some summary
-// information.
-class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
-  int _print_line;
-  size_t _total_free;
-  AdaptiveFreeList<FreeChunk> _total;
-
- public:
-  PrintTreeCensusClosure() {
-    _print_line = 0;
-    _total_free = 0;
-  }
-  AdaptiveFreeList<FreeChunk>* total() { return &_total; }
-  size_t total_free() { return _total_free; }
-
-  void do_list(AdaptiveFreeList<FreeChunk>* fl) {
-    LogStreamHandle(Debug, gc, freelist, census) out;
-
-    if (++_print_line >= 40) {
-      AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
-      _print_line = 0;
-    }
-    fl->print_on(&out);
-    _total_free +=           fl->count()             * fl->size()        ;
-    total()->set_count(      total()->count()        + fl->count()      );
-    total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
-    total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
-    total()->set_desired(    total()->desired()      + fl->desired()    );
-    total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
-    total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
-    total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
-    total()->set_coal_deaths( total()->coal_deaths()  + fl->coal_deaths() );
-    total()->set_split_births(total()->split_births() + fl->split_births());
-    total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
-  }
-};
-
-void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
-
-  st->print_cr("BinaryTree");
-  AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
-  PrintTreeCensusClosure ptc;
-  ptc.do_tree(root());
-
-  AdaptiveFreeList<FreeChunk>* total = ptc.total();
-  AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
-  total->print_on(st, "TOTAL\t");
-  st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
-               ptc.total_free(),
-               (double)(total->split_births() + total->coal_births()
-                      - total->split_deaths() - total->coal_deaths())
-               /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
-              (double)(total->desired() - total->count())
-              /(total->desired() != 0 ? (double)total->desired() : 1.0));
-}
-
-/////////////////////////////////////////////////////////////////////////
-//// CompactibleFreeListSpace
-/////////////////////////////////////////////////////////////////////////
-
-// highest ranked  free list lock rank
-int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
-
-// Defaults are 0 so things will break badly if incorrectly initialized.
-size_t CompactibleFreeListSpace::IndexSetStart  = 0;
-size_t CompactibleFreeListSpace::IndexSetStride = 0;
-size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
-
-size_t MinChunkSize = 0;
-
-void CompactibleFreeListSpace::set_cms_values() {
-  // Set CMS global values
-  assert(MinChunkSize == 0, "already set");
-
-  // MinChunkSize should be a multiple of MinObjAlignment and be large enough
-  // for chunks to contain a FreeChunk.
-  _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
-  MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
-
-  assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
-  IndexSetStart  = MinChunkSize;
-  IndexSetStride = MinObjAlignment;
-}
-
-// Constructor
-CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
-  _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
-                    CMSRescanMultiple),
-  _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
-                    CMSConcMarkMultiple),
-  _bt(bs, mr),
-  _collector(NULL),
-  // free list locks are in the range of values taken by _lockRank
-  // This range currently is [_leaf+2, _leaf+3]
-  // Note: this requires that CFLspace c'tors
-  // are called serially in the order in which the locks are
-  // are acquired in the program text. This is true today.
-  _freelistLock(_lockRank--, "CompactibleFreeListSpace_lock", true,
-                Monitor::_safepoint_check_never),
-  _preconsumptionDirtyCardClosure(NULL),
-  _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
-                          "CompactibleFreeListSpace_dict_par_lock", true,
-                          Monitor::_safepoint_check_never)
-{
-  assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
-         "FreeChunk is larger than expected");
-  _bt.set_space(this);
-  initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
-
-  _dictionary = new AFLBinaryTreeDictionary(mr);
-
-  assert(_dictionary != NULL, "CMS dictionary initialization");
-  // The indexed free lists are initially all empty and are lazily
-  // filled in on demand. Initialize the array elements to NULL.
-  initializeIndexedFreeListArray();
-
-  _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
-                             SmallForLinearAlloc);
-
-  // CMSIndexedFreeListReplenish should be at least 1
-  CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
-  _promoInfo.setSpace(this);
-  if (UseCMSBestFit) {
-    _fitStrategy = FreeBlockBestFitFirst;
-  } else {
-    _fitStrategy = FreeBlockStrategyNone;
-  }
-  check_free_list_consistency();
-
-  // Initialize locks for parallel case.
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
-                                            "a freelist par lock", true, Mutex::_safepoint_check_never);
-    DEBUG_ONLY(
-      _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
-    )
-  }
-  _dictionary->set_par_lock(&_parDictionaryAllocLock);
-
-  _used_stable = 0;
-}
-
-// Like CompactibleSpace forward() but always calls cross_threshold() to
-// update the block offset table.  Removed initialize_threshold call because
-// CFLS does not use a block offset array for contiguous spaces.
-HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
-                                    CompactPoint* cp, HeapWord* compact_top) {
-  // q is alive
-  // First check if we should switch compaction space
-  assert(this == cp->space, "'this' should be current compaction space.");
-  size_t compaction_max_size = pointer_delta(end(), compact_top);
-  assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
-    "virtual adjustObjectSize_v() method is not correct");
-  size_t adjusted_size = adjustObjectSize(size);
-  assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
-         "no small fragments allowed");
-  assert(minimum_free_block_size() == MinChunkSize,
-         "for de-virtualized reference below");
-  // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
-  if (adjusted_size + MinChunkSize > compaction_max_size &&
-      adjusted_size != compaction_max_size) {
-    do {
-      // switch to next compaction space
-      cp->space->set_compaction_top(compact_top);
-      cp->space = cp->space->next_compaction_space();
-      if (cp->space == NULL) {
-        cp->gen = CMSHeap::heap()->young_gen();
-        assert(cp->gen != NULL, "compaction must succeed");
-        cp->space = cp->gen->first_compaction_space();
-        assert(cp->space != NULL, "generation must have a first compaction space");
-      }
-      compact_top = cp->space->bottom();
-      cp->space->set_compaction_top(compact_top);
-      // The correct adjusted_size may not be the same as that for this method
-      // (i.e., cp->space may no longer be "this" so adjust the size again.
-      // Use the virtual method which is not used above to save the virtual
-      // dispatch.
-      adjusted_size = cp->space->adjust_object_size_v(size);
-      compaction_max_size = pointer_delta(cp->space->end(), compact_top);
-      assert(cp->space->minimum_free_block_size() == 0, "just checking");
-    } while (adjusted_size > compaction_max_size);
-  }
-
-  // store the forwarding pointer into the mark word
-  if ((HeapWord*)q != compact_top) {
-    q->forward_to(oop(compact_top));
-    assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
-  } else {
-    // if the object isn't moving we can just set the mark to the default
-    // mark and handle it specially later on.
-    q->init_mark_raw();
-    assert(q->forwardee() == NULL, "should be forwarded to NULL");
-  }
-
-  compact_top += adjusted_size;
-
-  // we need to update the offset table so that the beginnings of objects can be
-  // found during scavenge.  Note that we are updating the offset table based on
-  // where the object will be once the compaction phase finishes.
-
-  // Always call cross_threshold().  A contiguous space can only call it when
-  // the compaction_top exceeds the current threshold but not for an
-  // non-contiguous space.
-  cp->threshold =
-    cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
-  return compact_top;
-}
-
-// A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
-// and use of single_block instead of alloc_block.  The name here is not really
-// appropriate - maybe a more general name could be invented for both the
-// contiguous and noncontiguous spaces.
-
-HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
-  _bt.single_block(start, the_end);
-  return end();
-}
-
-// Initialize them to NULL.
-void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
-  for (size_t i = 0; i < IndexSetSize; i++) {
-    // Note that on platforms where objects are double word aligned,
-    // the odd array elements are not used.  It is convenient, however,
-    // to map directly from the object size to the array element.
-    _indexedFreeList[i].reset(IndexSetSize);
-    _indexedFreeList[i].set_size(i);
-    assert(_indexedFreeList[i].count() == 0, "reset check failed");
-    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
-  }
-}
-
-size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
-  return adjustObjectSize(oop(addr)->size());
-}
-
-void CompactibleFreeListSpace::resetIndexedFreeListArray() {
-  for (size_t i = 1; i < IndexSetSize; i++) {
-    assert(_indexedFreeList[i].size() == (size_t) i,
-      "Indexed free list sizes are incorrect");
-    _indexedFreeList[i].reset(IndexSetSize);
-    assert(_indexedFreeList[i].count() == 0, "reset check failed");
-    assert(_indexedFreeList[i].head() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
-    assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
-  }
-}
-
-void CompactibleFreeListSpace::reset(MemRegion mr) {
-  resetIndexedFreeListArray();
-  dictionary()->reset();
-  if (BlockOffsetArrayUseUnallocatedBlock) {
-    assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
-    // Everything's allocated until proven otherwise.
-    _bt.set_unallocated_block(end());
-  }
-  if (!mr.is_empty()) {
-    assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
-    _bt.single_block(mr.start(), mr.word_size());
-    FreeChunk* fc = (FreeChunk*) mr.start();
-    fc->set_size(mr.word_size());
-    if (mr.word_size() >= IndexSetSize ) {
-      returnChunkToDictionary(fc);
-    } else {
-      _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-      _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
-    }
-    coalBirth(mr.word_size());
-  }
-  _promoInfo.reset();
-  _smallLinearAllocBlock._ptr = NULL;
-  _smallLinearAllocBlock._word_size = 0;
-}
-
-void CompactibleFreeListSpace::reset_after_compaction() {
-  // Reset the space to the new reality - one free chunk.
-  MemRegion mr(compaction_top(), end());
-  reset(mr);
-  // Now refill the linear allocation block(s) if possible.
-  refillLinearAllocBlocksIfNeeded();
-}
-
-// Walks the entire dictionary, returning a coterminal
-// chunk, if it exists. Use with caution since it involves
-// a potentially complete walk of a potentially large tree.
-FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
-
-  assert_lock_strong(&_freelistLock);
-
-  return dictionary()->find_chunk_ends_at(end());
-}
-
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
-  }
-}
-
-size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
-  size_t sum = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
-  }
-  return sum;
-}
-
-size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
-  size_t count = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
-    debug_only(
-      ssize_t total_list_count = 0;
-      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-         fc = fc->next()) {
-        total_list_count++;
-      }
-      assert(total_list_count ==  _indexedFreeList[i].count(),
-        "Count in list is incorrect");
-    )
-    count += _indexedFreeList[i].count();
-  }
-  return count;
-}
-
-size_t CompactibleFreeListSpace::totalCount() {
-  size_t num = totalCountInIndexedFreeLists();
-  num +=  dictionary()->total_count();
-  if (_smallLinearAllocBlock._word_size != 0) {
-    num++;
-  }
-  return num;
-}
-#endif
-
-bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*) p;
-  return fc->is_free();
-}
-
-size_t CompactibleFreeListSpace::used() const {
-  return capacity() - free();
-}
-
-size_t CompactibleFreeListSpace::used_stable() const {
-  return _used_stable;
-}
-
-void CompactibleFreeListSpace::recalculate_used_stable() {
-  _used_stable = used();
-}
-
-size_t CompactibleFreeListSpace::free() const {
-  // "MT-safe, but not MT-precise"(TM), if you will: i.e.
-  // if you do this while the structures are in flux you
-  // may get an approximate answer only; for instance
-  // because there is concurrent allocation either
-  // directly by mutators or for promotion during a GC.
-  // It's "MT-safe", however, in the sense that you are guaranteed
-  // not to crash and burn, for instance, because of walking
-  // pointers that could disappear as you were walking them.
-  // The approximation is because the various components
-  // that are read below are not read atomically (and
-  // further the computation of totalSizeInIndexedFreeLists()
-  // is itself a non-atomic computation. The normal use of
-  // this is during a resize operation at the end of GC
-  // and at that time you are guaranteed to get the
-  // correct actual value. However, for instance, this is
-  // also read completely asynchronously by the "perf-sampler"
-  // that supports jvmstat, and you are apt to see the values
-  // flicker in such cases.
-  assert(_dictionary != NULL, "No _dictionary?");
-  return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
-          totalSizeInIndexedFreeLists() +
-          _smallLinearAllocBlock._word_size) * HeapWordSize;
-}
-
-size_t CompactibleFreeListSpace::max_alloc_in_words() const {
-  assert(_dictionary != NULL, "No _dictionary?");
-  assert_locked();
-  size_t res = _dictionary->max_chunk_size();
-  res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
-                       (size_t) SmallForLinearAlloc - 1));
-  // XXX the following could potentially be pretty slow;
-  // should one, pessimistically for the rare cases when res
-  // calculated above is less than IndexSetSize,
-  // just return res calculated above? My reasoning was that
-  // those cases will be so rare that the extra time spent doesn't
-  // really matter....
-  // Note: do not change the loop test i >= res + IndexSetStride
-  // to i > res below, because i is unsigned and res may be zero.
-  for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
-       i -= IndexSetStride) {
-    if (_indexedFreeList[i].head() != NULL) {
-      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
-      return i;
-    }
-  }
-  return res;
-}
-
-void LinearAllocBlock::print_on(outputStream* st) const {
-  st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
-            ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
-            p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
-}
-
-void CompactibleFreeListSpace::print_on(outputStream* st) const {
-  st->print_cr("COMPACTIBLE FREELIST SPACE");
-  st->print_cr(" Space:");
-  Space::print_on(st);
-
-  st->print_cr("promoInfo:");
-  _promoInfo.print_on(st);
-
-  st->print_cr("_smallLinearAllocBlock");
-  _smallLinearAllocBlock.print_on(st);
-
-  // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
-
-  st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
-}
-
-void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
-const {
-  reportIndexedFreeListStatistics(st);
-  st->print_cr("Layout of Indexed Freelists");
-  st->print_cr("---------------------------");
-  AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    _indexedFreeList[i].print_on(st);
-    for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
-      st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
-                   p2i(fc), p2i((HeapWord*)fc + i),
-                   fc->cantCoalesce() ? "\t CC" : "");
-    }
-  }
-}
-
-void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
-const {
-  _promoInfo.print_on(st);
-}
-
-void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
-const {
-  _dictionary->report_statistics(st);
-  st->print_cr("Layout of Freelists in Tree");
-  st->print_cr("---------------------------");
-  _dictionary->print_free_lists(st);
-}
-
-class BlkPrintingClosure: public BlkClosure {
-  const CMSCollector*             _collector;
-  const CompactibleFreeListSpace* _sp;
-  const CMSBitMap*                _live_bit_map;
-  const bool                      _post_remark;
-  outputStream*                   _st;
-public:
-  BlkPrintingClosure(const CMSCollector* collector,
-                     const CompactibleFreeListSpace* sp,
-                     const CMSBitMap* live_bit_map,
-                     outputStream* st):
-    _collector(collector),
-    _sp(sp),
-    _live_bit_map(live_bit_map),
-    _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
-    _st(st) { }
-  size_t do_blk(HeapWord* addr);
-};
-
-size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
-  size_t sz = _sp->block_size_no_stall(addr, _collector);
-  assert(sz != 0, "Should always be able to compute a size");
-  if (_sp->block_is_obj(addr)) {
-    const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
-    _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
-      p2i(addr),
-      dead ? "dead" : "live",
-      sz,
-      (!dead && CMSPrintObjectsInDump) ? ":" : ".");
-    if (CMSPrintObjectsInDump && !dead) {
-      oop(addr)->print_on(_st);
-      _st->print_cr("--------------------------------------");
-    }
-  } else { // free block
-    _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
-      p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
-    if (CMSPrintChunksInDump) {
-      ((FreeChunk*)addr)->print_on(_st);
-      _st->print_cr("--------------------------------------");
-    }
-  }
-  return sz;
-}
-
-void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
-  st->print_cr("=========================");
-  st->print_cr("Block layout in CMS Heap:");
-  st->print_cr("=========================");
-  BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
-  blk_iterate(&bpcl);
-
-  st->print_cr("=======================================");
-  st->print_cr("Order & Layout of Promotion Info Blocks");
-  st->print_cr("=======================================");
-  print_promo_info_blocks(st);
-
-  st->print_cr("===========================");
-  st->print_cr("Order of Indexed Free Lists");
-  st->print_cr("=========================");
-  print_indexed_free_lists(st);
-
-  st->print_cr("=================================");
-  st->print_cr("Order of Free Lists in Dictionary");
-  st->print_cr("=================================");
-  print_dictionary_free_lists(st);
-}
-
-
-void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
-  assert_lock_strong(&_freelistLock);
-  Log(gc, freelist, stats) log;
-  if (!log.is_debug()) {
-    return;
-  }
-  log.debug("%s", title);
-
-  LogStream out(log.debug());
-  _dictionary->report_statistics(&out);
-
-  if (log.is_trace()) {
-    LogStream trace_out(log.trace());
-    reportIndexedFreeListStatistics(&trace_out);
-    size_t total_size = totalSizeInIndexedFreeLists() +
-                       _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
-    log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
-  }
-}
-
-void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
-  assert_lock_strong(&_freelistLock);
-  st->print_cr("Statistics for IndexedFreeLists:");
-  st->print_cr("--------------------------------");
-  size_t total_size = totalSizeInIndexedFreeLists();
-  size_t free_blocks = numFreeBlocksInIndexedFreeLists();
-  st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
-  st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
-  st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
-  if (free_blocks != 0) {
-    st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
-  }
-}
-
-size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
-  size_t res = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    debug_only(
-      ssize_t recount = 0;
-      for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-         fc = fc->next()) {
-        recount += 1;
-      }
-      assert(recount == _indexedFreeList[i].count(),
-        "Incorrect count in list");
-    )
-    res += _indexedFreeList[i].count();
-  }
-  return res;
-}
-
-size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
-  for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
-    if (_indexedFreeList[i].head() != NULL) {
-      assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
-      return (size_t)i;
-    }
-  }
-  return 0;
-}
-
-void CompactibleFreeListSpace::set_end(HeapWord* value) {
-  HeapWord* prevEnd = end();
-  assert(prevEnd != value, "unnecessary set_end call");
-  assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
-        "New end is below unallocated block");
-  _end = value;
-  if (prevEnd != NULL) {
-    // Resize the underlying block offset table.
-    _bt.resize(pointer_delta(value, bottom()));
-    if (value <= prevEnd) {
-      assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
-             "New end is below unallocated block");
-    } else {
-      // Now, take this new chunk and add it to the free blocks.
-      // Note that the BOT has not yet been updated for this block.
-      size_t newFcSize = pointer_delta(value, prevEnd);
-      // Add the block to the free lists, if possible coalescing it
-      // with the last free block, and update the BOT and census data.
-      addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
-    }
-  }
-}
-
-class FreeListSpaceDCTOC : public FilteringDCTOC {
-  CompactibleFreeListSpace* _cfls;
-  CMSCollector* _collector;
-  bool _parallel;
-protected:
-  // Override.
-#define walk_mem_region_with_cl_DECL(ClosureType)                       \
-  virtual void walk_mem_region_with_cl(MemRegion mr,                    \
-                                       HeapWord* bottom, HeapWord* top, \
-                                       ClosureType* cl);                \
-      void walk_mem_region_with_cl_par(MemRegion mr,                    \
-                                       HeapWord* bottom, HeapWord* top, \
-                                       ClosureType* cl);                \
-    void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
-                                       HeapWord* bottom, HeapWord* top, \
-                                       ClosureType* cl)
-  walk_mem_region_with_cl_DECL(OopIterateClosure);
-  walk_mem_region_with_cl_DECL(FilteringClosure);
-
-public:
-  FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
-                     CMSCollector* collector,
-                     OopIterateClosure* cl,
-                     CardTable::PrecisionStyle precision,
-                     HeapWord* boundary,
-                     bool parallel) :
-    FilteringDCTOC(sp, cl, precision, boundary),
-    _cfls(sp), _collector(collector), _parallel(parallel) {}
-};
-
-// We de-virtualize the block-related calls below, since we know that our
-// space is a CompactibleFreeListSpace.
-
-#define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
-                                                 HeapWord* bottom,              \
-                                                 HeapWord* top,                 \
-                                                 ClosureType* cl) {             \
-   if (_parallel) {                                                             \
-     walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
-   } else {                                                                     \
-     walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
-   }                                                                            \
-}                                                                               \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
-                                                     HeapWord* bottom,          \
-                                                     HeapWord* top,             \
-                                                     ClosureType* cl) {         \
-  /* Skip parts that are before "mr", in case "block_start" sent us             \
-     back too far. */                                                           \
-  HeapWord* mr_start = mr.start();                                              \
-  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
-  HeapWord* next = bottom + bot_size;                                           \
-  while (next < mr_start) {                                                     \
-    bottom = next;                                                              \
-    bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
-    next = bottom + bot_size;                                                   \
-  }                                                                             \
-                                                                                \
-  while (bottom < top) {                                                        \
-    if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
-        !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
-                    oop(bottom)) &&                                             \
-        !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
-      bottom += _cfls->adjustObjectSize(word_sz);                               \
-    } else {                                                                    \
-      bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
-    }                                                                           \
-  }                                                                             \
-}                                                                               \
-void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
-                                                       HeapWord* bottom,        \
-                                                       HeapWord* top,           \
-                                                       ClosureType* cl) {       \
-  /* Skip parts that are before "mr", in case "block_start" sent us             \
-     back too far. */                                                           \
-  HeapWord* mr_start = mr.start();                                              \
-  size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
-  HeapWord* next = bottom + bot_size;                                           \
-  while (next < mr_start) {                                                     \
-    bottom = next;                                                              \
-    bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
-    next = bottom + bot_size;                                                   \
-  }                                                                             \
-                                                                                \
-  while (bottom < top) {                                                        \
-    if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
-        !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
-                    oop(bottom)) &&                                             \
-        !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
-      size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
-      bottom += _cfls->adjustObjectSize(word_sz);                               \
-    } else {                                                                    \
-      bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
-    }                                                                           \
-  }                                                                             \
-}
-
-// (There are only two of these, rather than N, because the split is due
-// only to the introduction of the FilteringClosure, a local part of the
-// impl of this abstraction.)
-FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
-FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
-
-DirtyCardToOopClosure*
-CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
-                                      CardTable::PrecisionStyle precision,
-                                      HeapWord* boundary,
-                                      bool parallel) {
-  return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
-}
-
-
-// Note on locking for the space iteration functions:
-// since the collector's iteration activities are concurrent with
-// allocation activities by mutators, absent a suitable mutual exclusion
-// mechanism the iterators may go awry. For instance a block being iterated
-// may suddenly be allocated or divided up and part of it allocated and
-// so on.
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += cl->do_blk_careful(cur));
-}
-
-// Apply the given closure to each block in the space.
-void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += cl->do_blk(cur));
-}
-
-// Apply the given closure to each oop in the space.
-void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
-  assert_lock_strong(freelistLock());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur)) {
-      oop(cur)->oop_iterate(cl);
-    }
-  }
-}
-
-// NOTE: In the following methods, in order to safely be able to
-// apply the closure to an object, we need to be sure that the
-// object has been initialized. We are guaranteed that an object
-// is initialized if we are holding the Heap_lock with the
-// world stopped.
-void CompactibleFreeListSpace::verify_objects_initialized() const {
-  if (is_init_completed()) {
-    assert_locked_or_safepoint(Heap_lock);
-    if (Universe::is_fully_initialized()) {
-      guarantee(SafepointSynchronize::is_at_safepoint(),
-                "Required for objects to be initialized");
-    }
-  } // else make a concession at vm start-up
-}
-
-// Apply the given closure to each object in the space
-void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
-  assert_lock_strong(freelistLock());
-  NOT_PRODUCT(verify_objects_initialized());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur)) {
-      blk->do_object(oop(cur));
-    }
-  }
-}
-
-// Apply the given closure to each live object in the space
-//   The usage of CompactibleFreeListSpace
-// by the ConcurrentMarkSweepGeneration for concurrent GC's allows
-// objects in the space with references to objects that are no longer
-// valid.  For example, an object may reference another object
-// that has already been sweep up (collected).  This method uses
-// obj_is_alive() to determine whether it is safe to apply the closure to
-// an object.  See obj_is_alive() for details on how liveness of an
-// object is decided.
-
-void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
-  assert_lock_strong(freelistLock());
-  NOT_PRODUCT(verify_objects_initialized());
-  HeapWord *cur, *limit;
-  size_t curSize;
-  for (cur = bottom(), limit = end(); cur < limit;
-       cur += curSize) {
-    curSize = block_size(cur);
-    if (block_is_obj(cur) && obj_is_alive(cur)) {
-      blk->do_object(oop(cur));
-    }
-  }
-}
-
-void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
-                                                  UpwardsObjectClosure* cl) {
-  assert_locked(freelistLock());
-  NOT_PRODUCT(verify_objects_initialized());
-  assert(!mr.is_empty(), "Should be non-empty");
-  // We use MemRegion(bottom(), end()) rather than used_region() below
-  // because the two are not necessarily equal for some kinds of
-  // spaces, in particular, certain kinds of free list spaces.
-  // We could use the more complicated but more precise:
-  // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
-  // but the slight imprecision seems acceptable in the assertion check.
-  assert(MemRegion(bottom(), end()).contains(mr),
-         "Should be within used space");
-  HeapWord* prev = cl->previous();   // max address from last time
-  if (prev >= mr.end()) { // nothing to do
-    return;
-  }
-  // This assert will not work when we go from cms space to perm
-  // space, and use same closure. Easy fix deferred for later. XXX YSR
-  // assert(prev == NULL || contains(prev), "Should be within space");
-
-  bool last_was_obj_array = false;
-  HeapWord *blk_start_addr, *region_start_addr;
-  if (prev > mr.start()) {
-    region_start_addr = prev;
-    blk_start_addr    = prev;
-    // The previous invocation may have pushed "prev" beyond the
-    // last allocated block yet there may be still be blocks
-    // in this region due to a particular coalescing policy.
-    // Relax the assertion so that the case where the unallocated
-    // block is maintained and "prev" is beyond the unallocated
-    // block does not cause the assertion to fire.
-    assert((BlockOffsetArrayUseUnallocatedBlock &&
-            (!is_in(prev))) ||
-           (blk_start_addr == block_start(region_start_addr)), "invariant");
-  } else {
-    region_start_addr = mr.start();
-    blk_start_addr    = block_start(region_start_addr);
-  }
-  HeapWord* region_end_addr = mr.end();
-  MemRegion derived_mr(region_start_addr, region_end_addr);
-  while (blk_start_addr < region_end_addr) {
-    const size_t size = block_size(blk_start_addr);
-    if (block_is_obj(blk_start_addr)) {
-      last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
-    } else {
-      last_was_obj_array = false;
-    }
-    blk_start_addr += size;
-  }
-  if (!last_was_obj_array) {
-    assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
-           "Should be within (closed) used space");
-    assert(blk_start_addr > prev, "Invariant");
-    cl->set_previous(blk_start_addr); // min address for next time
-  }
-}
-
-// Callers of this iterator beware: The closure application should
-// be robust in the face of uninitialized objects and should (always)
-// return a correct size so that the next addr + size below gives us a
-// valid block boundary. [See for instance,
-// ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
-// in ConcurrentMarkSweepGeneration.cpp.]
-HeapWord*
-CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
-  ObjectClosureCareful* cl) {
-  assert_lock_strong(freelistLock());
-  // Can't use used_region() below because it may not necessarily
-  // be the same as [bottom(),end()); although we could
-  // use [used_region().start(),align_up(used_region().end(),CardSize)),
-  // that appears too cumbersome, so we just do the simpler check
-  // in the assertion below.
-  assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
-         "mr should be non-empty and within used space");
-  HeapWord *addr, *end;
-  size_t size;
-  for (addr = block_start_careful(mr.start()), end  = mr.end();
-       addr < end; addr += size) {
-    FreeChunk* fc = (FreeChunk*)addr;
-    if (fc->is_free()) {
-      // Since we hold the free list lock, which protects direct
-      // allocation in this generation by mutators, a free object
-      // will remain free throughout this iteration code.
-      size = fc->size();
-    } else {
-      // Note that the object need not necessarily be initialized,
-      // because (for instance) the free list lock does NOT protect
-      // object initialization. The closure application below must
-      // therefore be correct in the face of uninitialized objects.
-      size = cl->do_object_careful_m(oop(addr), mr);
-      if (size == 0) {
-        // An unparsable object found. Signal early termination.
-        return addr;
-      }
-    }
-  }
-  return NULL;
-}
-
-
-HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  return _bt.block_start(p);
-}
-
-HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
-  return _bt.block_start_careful(p);
-}
-
-size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  // This must be volatile, or else there is a danger that the compiler
-  // will compile the code below into a sometimes-infinite loop, by keeping
-  // the value read the first time in a register.
-  while (true) {
-    // We must do this until we get a consistent view of the object.
-    if (FreeChunk::indicatesFreeChunk(p)) {
-      volatile FreeChunk* fc = (volatile FreeChunk*)p;
-      size_t res = fc->size();
-
-      // Bugfix for systems with weak memory model (PPC64/IA64). The
-      // block's free bit was set and we have read the size of the
-      // block. Acquire and check the free bit again. If the block is
-      // still free, the read size is correct.
-      OrderAccess::acquire();
-
-      // If the object is still a free chunk, return the size, else it
-      // has been allocated so try again.
-      if (FreeChunk::indicatesFreeChunk(p)) {
-        assert(res != 0, "Block size should not be 0");
-        return res;
-      }
-    } else {
-      // Ensure klass read before size.
-      Klass* k = oop(p)->klass_or_null_acquire();
-      if (k != NULL) {
-        assert(k->is_klass(), "Should really be klass oop.");
-        oop o = (oop)p;
-        assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
-
-        size_t res = o->size_given_klass(k);
-        res = adjustObjectSize(res);
-        assert(res != 0, "Block size should not be 0");
-        return res;
-      }
-    }
-  }
-}
-
-// TODO: Now that is_parsable is gone, we should combine these two functions.
-// A variant of the above that uses the Printezis bits for
-// unparsable but allocated objects. This avoids any possible
-// stalls waiting for mutators to initialize objects, and is
-// thus potentially faster than the variant above. However,
-// this variant may return a zero size for a block that is
-// under mutation and for which a consistent size cannot be
-// inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
-size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
-                                                     const CMSCollector* c)
-const {
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
-  // This must be volatile, or else there is a danger that the compiler
-  // will compile the code below into a sometimes-infinite loop, by keeping
-  // the value read the first time in a register.
-  DEBUG_ONLY(uint loops = 0;)
-  while (true) {
-    // We must do this until we get a consistent view of the object.
-    if (FreeChunk::indicatesFreeChunk(p)) {
-      volatile FreeChunk* fc = (volatile FreeChunk*)p;
-      size_t res = fc->size();
-
-      // Bugfix for systems with weak memory model (PPC64/IA64). The
-      // free bit of the block was set and we have read the size of
-      // the block. Acquire and check the free bit again. If the
-      // block is still free, the read size is correct.
-      OrderAccess::acquire();
-
-      if (FreeChunk::indicatesFreeChunk(p)) {
-        assert(res != 0, "Block size should not be 0");
-        assert(loops == 0, "Should be 0");
-        return res;
-      }
-    } else {
-      // Ensure klass read before size.
-      Klass* k = oop(p)->klass_or_null_acquire();
-      if (k != NULL) {
-        assert(k->is_klass(), "Should really be klass oop.");
-        oop o = (oop)p;
-        assert(oopDesc::is_oop(o), "Should be an oop");
-
-        size_t res = o->size_given_klass(k);
-        res = adjustObjectSize(res);
-        assert(res != 0, "Block size should not be 0");
-        return res;
-      } else {
-        // May return 0 if P-bits not present.
-        return c->block_size_if_printezis_bits(p);
-      }
-    }
-    assert(loops == 0, "Can loop at most once");
-    DEBUG_ONLY(loops++;)
-  }
-}
-
-size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
-  NOT_PRODUCT(verify_objects_initialized());
-  assert(MemRegion(bottom(), end()).contains(p), "p not in space");
-  FreeChunk* fc = (FreeChunk*)p;
-  if (fc->is_free()) {
-    return fc->size();
-  } else {
-    // Ignore mark word because this may be a recently promoted
-    // object whose mark word is used to chain together grey
-    // objects (the last one would have a null value).
-    assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
-    return adjustObjectSize(oop(p)->size());
-  }
-}
-
-// This implementation assumes that the property of "being an object" is
-// stable.  But being a free chunk may not be (because of parallel
-// promotion.)
-bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*)p;
-  assert(is_in_reserved(p), "Should be in space");
-  if (FreeChunk::indicatesFreeChunk(p)) return false;
-  Klass* k = oop(p)->klass_or_null_acquire();
-  if (k != NULL) {
-    // Ignore mark word because it may have been used to
-    // chain together promoted objects (the last one
-    // would have a null value).
-    assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
-    return true;
-  } else {
-    return false;  // Was not an object at the start of collection.
-  }
-}
-
-// Check if the object is alive. This fact is checked either by consulting
-// the main marking bitmap in the sweeping phase or, if it's a permanent
-// generation and we're not in the sweeping phase, by checking the
-// perm_gen_verify_bit_map where we store the "deadness" information if
-// we did not sweep the perm gen in the most recent previous GC cycle.
-bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
-  assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
-         "Else races are possible");
-  assert(block_is_obj(p), "The address should point to an object");
-
-  // If we're sweeping, we use object liveness information from the main bit map
-  // for both perm gen and old gen.
-  // We don't need to lock the bitmap (live_map or dead_map below), because
-  // EITHER we are in the middle of the sweeping phase, and the
-  // main marking bit map (live_map below) is locked,
-  // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
-  // is stable, because it's mutated only in the sweeping phase.
-  // NOTE: This method is also used by jmap where, if class unloading is
-  // off, the results can return "false" for legitimate perm objects,
-  // when we are not in the midst of a sweeping phase, which can result
-  // in jmap not reporting certain perm gen objects. This will be moot
-  // if/when the perm gen goes away in the future.
-  if (_collector->abstract_state() == CMSCollector::Sweeping) {
-    CMSBitMap* live_map = _collector->markBitMap();
-    return live_map->par_isMarked((HeapWord*) p);
-  }
-  return true;
-}
-
-bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
-  FreeChunk* fc = (FreeChunk*)p;
-  assert(is_in_reserved(p), "Should be in space");
-  assert(_bt.block_start(p) == p, "Should be a block boundary");
-  if (!fc->is_free()) {
-    // Ignore mark word because it may have been used to
-    // chain together promoted objects (the last one
-    // would have a null value).
-    assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
-    return true;
-  }
-  return false;
-}
-
-// "MT-safe but not guaranteed MT-precise" (TM); you may get an
-// approximate answer if you don't hold the freelistlock when you call this.
-size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
-  size_t size = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    debug_only(
-      // We may be calling here without the lock in which case we
-      // won't do this modest sanity check.
-      if (freelistLock()->owned_by_self()) {
-        size_t total_list_size = 0;
-        for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
-          fc = fc->next()) {
-          total_list_size += i;
-        }
-        assert(total_list_size == i * _indexedFreeList[i].count(),
-               "Count in list is incorrect");
-      }
-    )
-    size += i * _indexedFreeList[i].count();
-  }
-  return size;
-}
-
-HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
-  MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
-  return allocate(size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
-  return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
-}
-
-HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
-  assert_lock_strong(freelistLock());
-  HeapWord* res = NULL;
-  assert(size == adjustObjectSize(size),
-         "use adjustObjectSize() before calling into allocate()");
-
-  res = allocate_adaptive_freelists(size);
-
-  if (res != NULL) {
-    // check that res does lie in this space!
-    assert(is_in_reserved(res), "Not in this space!");
-    assert(is_aligned((void*)res), "alignment check");
-
-    FreeChunk* fc = (FreeChunk*)res;
-    fc->markNotFree();
-    assert(!fc->is_free(), "shouldn't be marked free");
-    assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
-    // Verify that the block offset table shows this to
-    // be a single block, but not one which is unallocated.
-    _bt.verify_single_block(res, size);
-    _bt.verify_not_unallocated(res, size);
-    // mangle a just allocated object with a distinct pattern.
-    debug_only(fc->mangleAllocated(size));
-  }
-
-  // During GC we do not need to recalculate the stable used value for
-  // every allocation in old gen. It is done once at the end of GC instead
-  // for performance reasons.
-  if (!CMSHeap::heap()->is_gc_active()) {
-    recalculate_used_stable();
-  }
-
-  return res;
-}
-
-HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
-  assert_lock_strong(freelistLock());
-  HeapWord* res = NULL;
-  assert(size == adjustObjectSize(size),
-         "use adjustObjectSize() before calling into allocate()");
-
-  // Strategy
-  //   if small
-  //     exact size from small object indexed list if small
-  //     small or large linear allocation block (linAB) as appropriate
-  //     take from lists of greater sized chunks
-  //   else
-  //     dictionary
-  //     small or large linear allocation block if it has the space
-  // Try allocating exact size from indexTable first
-  if (size < IndexSetSize) {
-    res = (HeapWord*) getChunkFromIndexedFreeList(size);
-    if(res != NULL) {
-      assert(res != (HeapWord*)_indexedFreeList[size].head(),
-        "Not removed from free list");
-      // no block offset table adjustment is necessary on blocks in
-      // the indexed lists.
-
-    // Try allocating from the small LinAB
-    } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
-        (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
-        // if successful, the above also adjusts block offset table
-        // Note that this call will refill the LinAB to
-        // satisfy the request.  This is different that
-        // evm.
-        // Don't record chunk off a LinAB?  smallSplitBirth(size);
-    } else {
-      // Raid the exact free lists larger than size, even if they are not
-      // overpopulated.
-      res = (HeapWord*) getChunkFromGreater(size);
-    }
-  } else {
-    // Big objects get allocated directly from the dictionary.
-    res = (HeapWord*) getChunkFromDictionaryExact(size);
-    if (res == NULL) {
-      // Try hard not to fail since an allocation failure will likely
-      // trigger a synchronous GC.  Try to get the space from the
-      // allocation blocks.
-      res = getChunkFromSmallLinearAllocBlockRemainder(size);
-    }
-  }
-
-  return res;
-}
-
-// A worst-case estimate of the space required (in HeapWords) to expand the heap
-// when promoting obj.
-size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
-  // Depending on the object size, expansion may require refilling either a
-  // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
-  // is added because the dictionary may over-allocate to avoid fragmentation.
-  size_t space = obj_size;
-  space += _promoInfo.refillSize() + 2 * MinChunkSize;
-  return space;
-}
-
-FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
-  FreeChunk* ret;
-
-  assert(numWords >= MinChunkSize, "Size is less than minimum");
-  assert(linearAllocationWouldFail() || bestFitFirst(),
-    "Should not be here");
-
-  size_t i;
-  size_t currSize = numWords + MinChunkSize;
-  assert(is_object_aligned(currSize), "currSize should be aligned");
-  for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
-    if (fl->head()) {
-      ret = getFromListGreater(fl, numWords);
-      assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
-      return ret;
-    }
-  }
-
-  currSize = MAX2((size_t)SmallForDictionary,
-                  (size_t)(numWords + MinChunkSize));
-
-  /* Try to get a chunk that satisfies request, while avoiding
-     fragmentation that can't be handled. */
-  {
-    ret =  dictionary()->get_chunk(currSize);
-    if (ret != NULL) {
-      assert(ret->size() - numWords >= MinChunkSize,
-             "Chunk is too small");
-      _bt.allocated((HeapWord*)ret, ret->size());
-      /* Carve returned chunk. */
-      (void) splitChunkAndReturnRemainder(ret, numWords);
-      /* Label this as no longer a free chunk. */
-      assert(ret->is_free(), "This chunk should be free");
-      ret->link_prev(NULL);
-    }
-    assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
-    return ret;
-  }
-  ShouldNotReachHere();
-}
-
-bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
-  assert(fc->size() < IndexSetSize, "Size of chunk is too large");
-  return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
-}
-
-bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
-  assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
-         (_smallLinearAllocBlock._word_size == fc->size()),
-         "Linear allocation block shows incorrect size");
-  return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
-          (_smallLinearAllocBlock._word_size == fc->size()));
-}
-
-// Check if the purported free chunk is present either as a linear
-// allocation block, the size-indexed table of (smaller) free blocks,
-// or the larger free blocks kept in the binary tree dictionary.
-bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
-  if (verify_chunk_is_linear_alloc_block(fc)) {
-    return true;
-  } else if (fc->size() < IndexSetSize) {
-    return verifyChunkInIndexedFreeLists(fc);
-  } else {
-    return dictionary()->verify_chunk_in_free_list(fc);
-  }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::assert_locked() const {
-  CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
-}
-
-void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
-  CMSLockVerifier::assert_locked(lock);
-}
-#endif
-
-FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
-  // In the parallel case, the main thread holds the free list lock
-  // on behalf the parallel threads.
-  FreeChunk* fc;
-  {
-    // If GC is parallel, this might be called by several threads.
-    // This should be rare enough that the locking overhead won't affect
-    // the sequential code.
-    MutexLocker x(parDictionaryAllocLock(),
-                  Mutex::_no_safepoint_check_flag);
-    fc = getChunkFromDictionary(size);
-  }
-  if (fc != NULL) {
-    fc->dontCoalesce();
-    assert(fc->is_free(), "Should be free, but not coalescable");
-    // Verify that the block offset table shows this to
-    // be a single block, but not one which is unallocated.
-    _bt.verify_single_block((HeapWord*)fc, fc->size());
-    _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-  }
-  return fc;
-}
-
-oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
-  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
-  assert_locked();
-
-  // if we are tracking promotions, then first ensure space for
-  // promotion (including spooling space for saving header if necessary).
-  // then allocate and copy, then track promoted info if needed.
-  // When tracking (see PromotionInfo::track()), the mark word may
-  // be displaced and in this case restoration of the mark word
-  // occurs in the (oop_since_save_marks_)iterate phase.
-  if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
-    return NULL;
-  }
-  // Call the allocate(size_t, bool) form directly to avoid the
-  // additional call through the allocate(size_t) form.  Having
-  // the compile inline the call is problematic because allocate(size_t)
-  // is a virtual method.
-  HeapWord* res = allocate(adjustObjectSize(obj_size));
-  if (res != NULL) {
-    Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
-    // if we should be tracking promotions, do so.
-    if (_promoInfo.tracking()) {
-        _promoInfo.track((PromotedObject*)res);
-    }
-  }
-  return oop(res);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "minimum chunk size");
-  assert(size <  _smallLinearAllocBlock._allocation_size_limit,
-    "maximum from smallLinearAllocBlock");
-  return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
-}
-
-HeapWord*
-CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
-                                                       size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "too small");
-  HeapWord* res = NULL;
-  // Try to do linear allocation from blk, making sure that
-  if (blk->_word_size == 0) {
-    // We have probably been unable to fill this either in the prologue or
-    // when it was exhausted at the last linear allocation. Bail out until
-    // next time.
-    assert(blk->_ptr == NULL, "consistency check");
-    return NULL;
-  }
-  assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
-  res = getChunkFromLinearAllocBlockRemainder(blk, size);
-  if (res != NULL) return res;
-
-  // about to exhaust this linear allocation block
-  if (blk->_word_size == size) { // exactly satisfied
-    res = blk->_ptr;
-    _bt.allocated(res, blk->_word_size);
-  } else if (size + MinChunkSize <= blk->_refillSize) {
-    size_t sz = blk->_word_size;
-    // Update _unallocated_block if the size is such that chunk would be
-    // returned to the indexed free list.  All other chunks in the indexed
-    // free lists are allocated from the dictionary so that _unallocated_block
-    // has already been adjusted for them.  Do it here so that the cost
-    // for all chunks added back to the indexed free lists.
-    if (sz < SmallForDictionary) {
-      _bt.allocated(blk->_ptr, sz);
-    }
-    // Return the chunk that isn't big enough, and then refill below.
-    addChunkToFreeLists(blk->_ptr, sz);
-    split_birth(sz);
-    // Don't keep statistics on adding back chunk from a LinAB.
-  } else {
-    // A refilled block would not satisfy the request.
-    return NULL;
-  }
-
-  blk->_ptr = NULL; blk->_word_size = 0;
-  refillLinearAllocBlock(blk);
-  assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
-         "block was replenished");
-  if (res != NULL) {
-    split_birth(size);
-    repairLinearAllocBlock(blk);
-  } else if (blk->_ptr != NULL) {
-    res = blk->_ptr;
-    size_t blk_size = blk->_word_size;
-    blk->_word_size -= size;
-    blk->_ptr  += size;
-    split_birth(size);
-    repairLinearAllocBlock(blk);
-    // Update BOT last so that other (parallel) GC threads see a consistent
-    // view of the BOT and free blocks.
-    // Above must occur before BOT is updated below.
-    OrderAccess::storestore();
-    _bt.split_block(res, blk_size, size);  // adjust block offset table
-  }
-  return res;
-}
-
-HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
-                                        LinearAllocBlock* blk,
-                                        size_t size) {
-  assert_locked();
-  assert(size >= MinChunkSize, "too small");
-
-  HeapWord* res = NULL;
-  // This is the common case.  Keep it simple.
-  if (blk->_word_size >= size + MinChunkSize) {
-    assert(blk->_ptr != NULL, "consistency check");
-    res = blk->_ptr;
-    // Note that the BOT is up-to-date for the linAB before allocation.  It
-    // indicates the start of the linAB.  The split_block() updates the
-    // BOT for the linAB after the allocation (indicates the start of the
-    // next chunk to be allocated).
-    size_t blk_size = blk->_word_size;
-    blk->_word_size -= size;
-    blk->_ptr  += size;
-    split_birth(size);
-    repairLinearAllocBlock(blk);
-    // Update BOT last so that other (parallel) GC threads see a consistent
-    // view of the BOT and free blocks.
-    // Above must occur before BOT is updated below.
-    OrderAccess::storestore();
-    _bt.split_block(res, blk_size, size);  // adjust block offset table
-    _bt.allocated(res, size);
-  }
-  return res;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
-  assert_locked();
-  assert(size < SmallForDictionary, "just checking");
-  FreeChunk* res;
-  res = _indexedFreeList[size].get_chunk_at_head();
-  if (res == NULL) {
-    res = getChunkFromIndexedFreeListHelper(size);
-  }
-  _bt.verify_not_unallocated((HeapWord*) res, size);
-  assert(res == NULL || res->size() == size, "Incorrect block size");
-  return res;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
-  bool replenish) {
-  assert_locked();
-  FreeChunk* fc = NULL;
-  if (size < SmallForDictionary) {
-    assert(_indexedFreeList[size].head() == NULL ||
-      _indexedFreeList[size].surplus() <= 0,
-      "List for this size should be empty or under populated");
-    // Try best fit in exact lists before replenishing the list
-    if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
-      // Replenish list.
-      //
-      // Things tried that failed.
-      //   Tried allocating out of the two LinAB's first before
-      // replenishing lists.
-      //   Tried small linAB of size 256 (size in indexed list)
-      // and replenishing indexed lists from the small linAB.
-      //
-      FreeChunk* newFc = NULL;
-      const size_t replenish_size = CMSIndexedFreeListReplenish * size;
-      if (replenish_size < SmallForDictionary) {
-        // Do not replenish from an underpopulated size.
-        if (_indexedFreeList[replenish_size].surplus() > 0 &&
-            _indexedFreeList[replenish_size].head() != NULL) {
-          newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
-        } else if (bestFitFirst()) {
-          newFc = bestFitSmall(replenish_size);
-        }
-      }
-      if (newFc == NULL && replenish_size > size) {
-        assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
-        newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
-      }
-      // Note: The stats update re split-death of block obtained above
-      // will be recorded below precisely when we know we are going to
-      // be actually splitting it into more than one pieces below.
-      if (newFc != NULL) {
-        if  (replenish || CMSReplenishIntermediate) {
-          // Replenish this list and return one block to caller.
-          size_t i;
-          FreeChunk *curFc, *nextFc;
-          size_t num_blk = newFc->size() / size;
-          assert(num_blk >= 1, "Smaller than requested?");
-          assert(newFc->size() % size == 0, "Should be integral multiple of request");
-          if (num_blk > 1) {
-            // we are sure we will be splitting the block just obtained
-            // into multiple pieces; record the split-death of the original
-            splitDeath(replenish_size);
-          }
-          // carve up and link blocks 0, ..., num_blk - 2
-          // The last chunk is not added to the lists but is returned as the
-          // free chunk.
-          for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
-               i = 0;
-               i < (num_blk - 1);
-               curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
-               i++) {
-            curFc->set_size(size);
-            // Don't record this as a return in order to try and
-            // determine the "returns" from a GC.
-            _bt.verify_not_unallocated((HeapWord*) fc, size);
-            _indexedFreeList[size].return_chunk_at_tail(curFc, false);
-            _bt.mark_block((HeapWord*)curFc, size);
-            split_birth(size);
-            // Don't record the initial population of the indexed list
-            // as a split birth.
-          }
-
-          // check that the arithmetic was OK above
-          assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
-            "inconsistency in carving newFc");
-          curFc->set_size(size);
-          _bt.mark_block((HeapWord*)curFc, size);
-          split_birth(size);
-          fc = curFc;
-        } else {
-          // Return entire block to caller
-          fc = newFc;
-        }
-      }
-    }
-  } else {
-    // Get a free chunk from the free chunk dictionary to be returned to
-    // replenish the indexed free list.
-    fc = getChunkFromDictionaryExact(size);
-  }
-  // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
-  return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
-  assert_locked();
-  FreeChunk* fc = _dictionary->get_chunk(size);
-  if (fc == NULL) {
-    return NULL;
-  }
-  _bt.allocated((HeapWord*)fc, fc->size());
-  if (fc->size() >= size + MinChunkSize) {
-    fc = splitChunkAndReturnRemainder(fc, size);
-  }
-  assert(fc->size() >= size, "chunk too small");
-  assert(fc->size() < size + MinChunkSize, "chunk too big");
-  _bt.verify_single_block((HeapWord*)fc, fc->size());
-  return fc;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
-  assert_locked();
-  FreeChunk* fc = _dictionary->get_chunk(size);
-  if (fc == NULL) {
-    return fc;
-  }
-  _bt.allocated((HeapWord*)fc, fc->size());
-  if (fc->size() == size) {
-    _bt.verify_single_block((HeapWord*)fc, size);
-    return fc;
-  }
-  assert(fc->size() > size, "get_chunk() guarantee");
-  if (fc->size() < size + MinChunkSize) {
-    // Return the chunk to the dictionary and go get a bigger one.
-    returnChunkToDictionary(fc);
-    fc = _dictionary->get_chunk(size + MinChunkSize);
-    if (fc == NULL) {
-      return NULL;
-    }
-    _bt.allocated((HeapWord*)fc, fc->size());
-  }
-  assert(fc->size() >= size + MinChunkSize, "tautology");
-  fc = splitChunkAndReturnRemainder(fc, size);
-  assert(fc->size() == size, "chunk is wrong size");
-  _bt.verify_single_block((HeapWord*)fc, size);
-  return fc;
-}
-
-void
-CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
-  assert_locked();
-
-  size_t size = chunk->size();
-  _bt.verify_single_block((HeapWord*)chunk, size);
-  // adjust _unallocated_block downward, as necessary
-  _bt.freed((HeapWord*)chunk, size);
-  _dictionary->return_chunk(chunk);
-#ifndef PRODUCT
-  if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
-    TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
-    TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
-    tl->verify_stats();
-  }
-#endif // PRODUCT
-}
-
-void
-CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
-  assert_locked();
-  size_t size = fc->size();
-  _bt.verify_single_block((HeapWord*) fc, size);
-  _bt.verify_not_unallocated((HeapWord*) fc, size);
-  _indexedFreeList[size].return_chunk_at_tail(fc);
-#ifndef PRODUCT
-  if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
-     _indexedFreeList[size].verify_stats();
-  }
-#endif // PRODUCT
-}
-
-// Add chunk to end of last block -- if it's the largest
-// block -- and update BOT and census data. We would
-// of course have preferred to coalesce it with the
-// last block, but it's currently less expensive to find the
-// largest block than it is to find the last.
-void
-CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
-  HeapWord* chunk, size_t     size) {
-  // check that the chunk does lie in this space!
-  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
-  // One of the parallel gc task threads may be here
-  // whilst others are allocating.
-  Mutex* lock = &_parDictionaryAllocLock;
-  FreeChunk* ec;
-  {
-    MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
-    ec = dictionary()->find_largest_dict();  // get largest block
-    if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
-      // It's a coterminal block - we can coalesce.
-      size_t old_size = ec->size();
-      coalDeath(old_size);
-      removeChunkFromDictionary(ec);
-      size += old_size;
-    } else {
-      ec = (FreeChunk*)chunk;
-    }
-  }
-  ec->set_size(size);
-  debug_only(ec->mangleFreed(size));
-  if (size < SmallForDictionary) {
-    lock = _indexedFreeListParLocks[size];
-  }
-  MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
-  addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
-  // record the birth under the lock since the recording involves
-  // manipulation of the list on which the chunk lives and
-  // if the chunk is allocated and is the last on the list,
-  // the list can go away.
-  coalBirth(size);
-}
-
-void
-CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
-                                              size_t     size) {
-  // check that the chunk does lie in this space!
-  assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
-  assert_locked();
-  _bt.verify_single_block(chunk, size);
-
-  FreeChunk* fc = (FreeChunk*) chunk;
-  fc->set_size(size);
-  debug_only(fc->mangleFreed(size));
-  if (size < SmallForDictionary) {
-    returnChunkToFreeList(fc);
-  } else {
-    returnChunkToDictionary(fc);
-  }
-}
-
-void
-CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
-  size_t size, bool coalesced) {
-  assert_locked();
-  assert(chunk != NULL, "null chunk");
-  if (coalesced) {
-    // repair BOT
-    _bt.single_block(chunk, size);
-  }
-  addChunkToFreeLists(chunk, size);
-}
-
-// We _must_ find the purported chunk on our free lists;
-// we assert if we don't.
-void
-CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
-  size_t size = fc->size();
-  assert_locked();
-  debug_only(verifyFreeLists());
-  if (size < SmallForDictionary) {
-    removeChunkFromIndexedFreeList(fc);
-  } else {
-    removeChunkFromDictionary(fc);
-  }
-  _bt.verify_single_block((HeapWord*)fc, size);
-  debug_only(verifyFreeLists());
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
-  size_t size = fc->size();
-  assert_locked();
-  assert(fc != NULL, "null chunk");
-  _bt.verify_single_block((HeapWord*)fc, size);
-  _dictionary->remove_chunk(fc);
-  // adjust _unallocated_block upward, as necessary
-  _bt.allocated((HeapWord*)fc, size);
-}
-
-void
-CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
-  assert_locked();
-  size_t size = fc->size();
-  _bt.verify_single_block((HeapWord*)fc, size);
-  NOT_PRODUCT(
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeList(size);
-    }
-  )
-  _indexedFreeList[size].remove_chunk(fc);
-  NOT_PRODUCT(
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeList(size);
-    }
-  )
-}
-
-FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
-  /* A hint is the next larger size that has a surplus.
-     Start search at a size large enough to guarantee that
-     the excess is >= MIN_CHUNK. */
-  size_t start = align_object_size(numWords + MinChunkSize);
-  if (start < IndexSetSize) {
-    AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
-    size_t    hint = _indexedFreeList[start].hint();
-    while (hint < IndexSetSize) {
-      assert(is_object_aligned(hint), "hint should be aligned");
-      AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
-      if (fl->surplus() > 0 && fl->head() != NULL) {
-        // Found a list with surplus, reset original hint
-        // and split out a free chunk which is returned.
-        _indexedFreeList[start].set_hint(hint);
-        FreeChunk* res = getFromListGreater(fl, numWords);
-        assert(res == NULL || res->is_free(),
-          "Should be returning a free chunk");
-        return res;
-      }
-      hint = fl->hint(); /* keep looking */
-    }
-    /* None found. */
-    it[start].set_hint(IndexSetSize);
-  }
-  return NULL;
-}
-
-/* Requires fl->size >= numWords + MinChunkSize */
-FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
-  size_t numWords) {
-  FreeChunk *curr = fl->head();
-  size_t oldNumWords = curr->size();
-  assert(numWords >= MinChunkSize, "Word size is too small");
-  assert(curr != NULL, "List is empty");
-  assert(oldNumWords >= numWords + MinChunkSize,
-        "Size of chunks in the list is too small");
-
-  fl->remove_chunk(curr);
-  // recorded indirectly by splitChunkAndReturnRemainder -
-  // smallSplit(oldNumWords, numWords);
-  FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
-  // Does anything have to be done for the remainder in terms of
-  // fixing the card table?
-  assert(new_chunk == NULL || new_chunk->is_free(),
-    "Should be returning a free chunk");
-  return new_chunk;
-}
-
-FreeChunk*
-CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
-  size_t new_size) {
-  assert_locked();
-  size_t size = chunk->size();
-  assert(size > new_size, "Split from a smaller block?");
-  assert(is_aligned(chunk), "alignment problem");
-  assert(size == adjustObjectSize(size), "alignment problem");
-  size_t rem_sz = size - new_size;
-  assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
-  assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
-  FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
-  assert(is_aligned(ffc), "alignment problem");
-  ffc->set_size(rem_sz);
-  ffc->link_next(NULL);
-  ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-  // Above must occur before BOT is updated below.
-  // adjust block offset table
-  OrderAccess::storestore();
-  assert(chunk->is_free() && ffc->is_free(), "Error");
-  _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
-  if (rem_sz < SmallForDictionary) {
-    // The freeList lock is held, but multiple GC task threads might be executing in parallel.
-    bool is_par = Thread::current()->is_GC_task_thread();
-    if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check();
-    returnChunkToFreeList(ffc);
-    split(size, rem_sz);
-    if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
-  } else {
-    returnChunkToDictionary(ffc);
-    split(size, rem_sz);
-  }
-  chunk->set_size(new_size);
-  return chunk;
-}
-
-void
-CompactibleFreeListSpace::sweep_completed() {
-  // Now that space is probably plentiful, refill linear
-  // allocation blocks as needed.
-  refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_prologue() {
-  assert_locked();
-  reportFreeListStatistics("Before GC:");
-  refillLinearAllocBlocksIfNeeded();
-}
-
-void
-CompactibleFreeListSpace::gc_epilogue() {
-  assert_locked();
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-  _promoInfo.stopTrackingPromotions();
-  repairLinearAllocationBlocks();
-  reportFreeListStatistics("After GC:");
-}
-
-// Iteration support, mostly delegated from a CMS generation
-
-void CompactibleFreeListSpace::save_marks() {
-  assert(Thread::current()->is_VM_thread(),
-         "Global variable should only be set when single-threaded");
-  // Mark the "end" of the used space at the time of this call;
-  // note, however, that promoted objects from this point
-  // on are tracked in the _promoInfo below.
-  set_saved_mark_word(unallocated_block());
-#ifdef ASSERT
-  // Check the sanity of save_marks() etc.
-  MemRegion ur    = used_region();
-  MemRegion urasm = used_region_at_save_marks();
-  assert(ur.contains(urasm),
-         " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
-         " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
-         p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
-#endif
-  // inform allocator that promotions should be tracked.
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-  _promoInfo.startTrackingPromotions();
-}
-
-bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
-  assert(_promoInfo.tracking(), "No preceding save_marks?");
-  return _promoInfo.noPromotions();
-}
-
-bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
-  return _smallLinearAllocBlock._word_size == 0;
-}
-
-void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
-  // Fix up linear allocation blocks to look like free blocks
-  repairLinearAllocBlock(&_smallLinearAllocBlock);
-}
-
-void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
-  assert_locked();
-  if (blk->_ptr != NULL) {
-    assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
-           "Minimum block size requirement");
-    FreeChunk* fc = (FreeChunk*)(blk->_ptr);
-    fc->set_size(blk->_word_size);
-    fc->link_prev(NULL);   // mark as free
-    fc->dontCoalesce();
-    assert(fc->is_free(), "just marked it free");
-    assert(fc->cantCoalesce(), "just marked it uncoalescable");
-  }
-}
-
-void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
-  assert_locked();
-  if (_smallLinearAllocBlock._ptr == NULL) {
-    assert(_smallLinearAllocBlock._word_size == 0,
-      "Size of linAB should be zero if the ptr is NULL");
-    // Reset the linAB refill and allocation size limit.
-    _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
-  }
-  refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
-  assert_locked();
-  assert((blk->_ptr == NULL && blk->_word_size == 0) ||
-         (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
-         "blk invariant");
-  if (blk->_ptr == NULL) {
-    refillLinearAllocBlock(blk);
-  }
-}
-
-void
-CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
-  assert_locked();
-  assert(blk->_word_size == 0 && blk->_ptr == NULL,
-         "linear allocation block should be empty");
-  FreeChunk* fc;
-  if (blk->_refillSize < SmallForDictionary &&
-      (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
-    // A linAB's strategy might be to use small sizes to reduce
-    // fragmentation but still get the benefits of allocation from a
-    // linAB.
-  } else {
-    fc = getChunkFromDictionary(blk->_refillSize);
-  }
-  if (fc != NULL) {
-    blk->_ptr  = (HeapWord*)fc;
-    blk->_word_size = fc->size();
-    fc->dontCoalesce();   // to prevent sweeper from sweeping us up
-  }
-}
-
-// Support for compaction
-void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
-  scan_and_forward(this, cp);
-  // Prepare_for_compaction() uses the space between live objects
-  // so that later phase can skip dead space quickly.  So verification
-  // of the free lists doesn't work after.
-}
-
-void CompactibleFreeListSpace::adjust_pointers() {
-  // In other versions of adjust_pointers(), a bail out
-  // based on the amount of live data in the generation
-  // (i.e., if 0, bail out) may be used.
-  // Cannot test used() == 0 here because the free lists have already
-  // been mangled by the compaction.
-
-  scan_and_adjust_pointers(this);
-  // See note about verification in prepare_for_compaction().
-}
-
-void CompactibleFreeListSpace::compact() {
-  scan_and_compact(this);
-}
-
-// Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
-// where fbs is free block sizes
-double CompactibleFreeListSpace::flsFrag() const {
-  size_t itabFree = totalSizeInIndexedFreeLists();
-  double frag = 0.0;
-  size_t i;
-
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    double sz  = i;
-    frag      += _indexedFreeList[i].count() * (sz * sz);
-  }
-
-  double totFree = itabFree +
-                   _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
-  if (totFree > 0) {
-    frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
-            (totFree * totFree));
-    frag = (double)1.0  - frag;
-  } else {
-    assert(frag == 0.0, "Follows from totFree == 0");
-  }
-  return frag;
-}
-
-void CompactibleFreeListSpace::beginSweepFLCensus(
-  float inter_sweep_current,
-  float inter_sweep_estimate,
-  float intra_sweep_estimate) {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
-    log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
-    fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
-    fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
-    fl->set_before_sweep(fl->count());
-    fl->set_bfr_surp(fl->surplus());
-  }
-  _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
-                                    inter_sweep_current,
-                                    inter_sweep_estimate,
-                                    intra_sweep_estimate);
-}
-
-void CompactibleFreeListSpace::setFLSurplus() {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    fl->set_surplus(fl->count() -
-                    (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
-  }
-}
-
-void CompactibleFreeListSpace::setFLHints() {
-  assert_locked();
-  size_t i;
-  size_t h = IndexSetSize;
-  for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    fl->set_hint(h);
-    if (fl->surplus() > 0) {
-      h = i;
-    }
-  }
-}
-
-void CompactibleFreeListSpace::clearFLCensus() {
-  assert_locked();
-  size_t i;
-  for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    fl->set_prev_sweep(fl->count());
-    fl->set_coal_births(0);
-    fl->set_coal_deaths(0);
-    fl->set_split_births(0);
-    fl->set_split_deaths(0);
-  }
-}
-
-void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
-  log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
-  setFLSurplus();
-  setFLHints();
-  printFLCensus(sweep_count);
-  clearFLCensus();
-  assert_locked();
-  _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
-}
-
-bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
-  if (size < SmallForDictionary) {
-    AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-    return (fl->coal_desired() < 0) ||
-           ((int)fl->count() > fl->coal_desired());
-  } else {
-    return dictionary()->coal_dict_over_populated(size);
-  }
-}
-
-void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_coal_births();
-  fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_coal_deaths();
-  fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::coalBirth(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallCoalBirth(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   false /* split */,
-                                   true /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::coalDeath(size_t size) {
-  if(size  < SmallForDictionary) {
-    smallCoalDeath(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   false /* split */,
-                                   false /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_split_births();
-  fl->increment_surplus();
-}
-
-void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
-  assert(size < SmallForDictionary, "Size too large for indexed list");
-  AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
-  fl->increment_split_deaths();
-  fl->decrement_surplus();
-}
-
-void CompactibleFreeListSpace::split_birth(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallSplitBirth(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   true /* split */,
-                                   true /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::splitDeath(size_t size) {
-  if (size  < SmallForDictionary) {
-    smallSplitDeath(size);
-  } else {
-    dictionary()->dict_census_update(size,
-                                   true /* split */,
-                                   false /* birth */);
-  }
-}
-
-void CompactibleFreeListSpace::split(size_t from, size_t to1) {
-  size_t to2 = from - to1;
-  splitDeath(from);
-  split_birth(to1);
-  split_birth(to2);
-}
-
-void CompactibleFreeListSpace::print() const {
-  print_on(tty);
-}
-
-void CompactibleFreeListSpace::prepare_for_verify() {
-  assert_locked();
-  repairLinearAllocationBlocks();
-  // Verify that the SpoolBlocks look like free blocks of
-  // appropriate sizes... To be done ...
-}
-
-class VerifyAllBlksClosure: public BlkClosure {
- private:
-  const CompactibleFreeListSpace* _sp;
-  const MemRegion                 _span;
-  HeapWord*                       _last_addr;
-  size_t                          _last_size;
-  bool                            _last_was_obj;
-  bool                            _last_was_live;
-
- public:
-  VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
-    MemRegion span) :  _sp(sp), _span(span),
-                       _last_addr(NULL), _last_size(0),
-                       _last_was_obj(false), _last_was_live(false) { }
-
-  virtual size_t do_blk(HeapWord* addr) {
-    size_t res;
-    bool   was_obj  = false;
-    bool   was_live = false;
-    if (_sp->block_is_obj(addr)) {
-      was_obj = true;
-      oop p = oop(addr);
-      guarantee(oopDesc::is_oop(p), "Should be an oop");
-      res = _sp->adjustObjectSize(p->size());
-      if (_sp->obj_is_alive(addr)) {
-        was_live = true;
-        oopDesc::verify(p);
-      }
-    } else {
-      FreeChunk* fc = (FreeChunk*)addr;
-      res = fc->size();
-      if (FLSVerifyLists && !fc->cantCoalesce()) {
-        guarantee(_sp->verify_chunk_in_free_list(fc),
-                  "Chunk should be on a free list");
-      }
-    }
-    if (res == 0) {
-      Log(gc, verify) log;
-      log.error("Livelock: no rank reduction!");
-      log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
-                " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
-        p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
-        p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
-      LogStream ls(log.error());
-      _sp->print_on(&ls);
-      guarantee(false, "Verification failed.");
-    }
-    _last_addr = addr;
-    _last_size = res;
-    _last_was_obj  = was_obj;
-    _last_was_live = was_live;
-    return res;
-  }
-};
-
-class VerifyAllOopsClosure: public BasicOopIterateClosure {
- private:
-  const CMSCollector*             _collector;
-  const CompactibleFreeListSpace* _sp;
-  const MemRegion                 _span;
-  const bool                      _past_remark;
-  const CMSBitMap*                _bit_map;
-
- protected:
-  void do_oop(void* p, oop obj) {
-    if (_span.contains(obj)) { // the interior oop points into CMS heap
-      if (!_span.contains(p)) { // reference from outside CMS heap
-        // Should be a valid object; the first disjunct below allows
-        // us to sidestep an assertion in block_is_obj() that insists
-        // that p be in _sp. Note that several generations (and spaces)
-        // are spanned by _span (CMS heap) above.
-        guarantee(!_sp->is_in_reserved(obj) ||
-                  _sp->block_is_obj((HeapWord*)obj),
-                  "Should be an object");
-        guarantee(oopDesc::is_oop(obj), "Should be an oop");
-        oopDesc::verify(obj);
-        if (_past_remark) {
-          // Remark has been completed, the object should be marked
-          _bit_map->isMarked((HeapWord*)obj);
-        }
-      } else { // reference within CMS heap
-        if (_past_remark) {
-          // Remark has been completed -- so the referent should have
-          // been marked, if referring object is.
-          if (_bit_map->isMarked(_collector->block_start(p))) {
-            guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
-          }
-        }
-      }
-    } else if (_sp->is_in_reserved(p)) {
-      // the reference is from FLS, and points out of FLS
-      guarantee(oopDesc::is_oop(obj), "Should be an oop");
-      oopDesc::verify(obj);
-    }
-  }
-
-  template <class T> void do_oop_work(T* p) {
-    T heap_oop = RawAccess<>::oop_load(p);
-    if (!CompressedOops::is_null(heap_oop)) {
-      oop obj = CompressedOops::decode_not_null(heap_oop);
-      do_oop(p, obj);
-    }
-  }
-
- public:
-  VerifyAllOopsClosure(const CMSCollector* collector,
-    const CompactibleFreeListSpace* sp, MemRegion span,
-    bool past_remark, CMSBitMap* bit_map) :
-    _collector(collector), _sp(sp), _span(span),
-    _past_remark(past_remark), _bit_map(bit_map) { }
-
-  virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
-  virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
-};
-
-void CompactibleFreeListSpace::verify() const {
-  assert_lock_strong(&_freelistLock);
-  verify_objects_initialized();
-  MemRegion span = _collector->_span;
-  bool past_remark = (_collector->abstract_state() ==
-                      CMSCollector::Sweeping);
-
-  ResourceMark rm;
-  HandleMark  hm;
-
-  // Check integrity of CFL data structures
-  _promoInfo.verify();
-  _dictionary->verify();
-  if (FLSVerifyIndexTable) {
-    verifyIndexedFreeLists();
-  }
-  // Check integrity of all objects and free blocks in space
-  {
-    VerifyAllBlksClosure cl(this, span);
-    ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
-  }
-  // Check that all references in the heap to FLS
-  // are to valid objects in FLS or that references in
-  // FLS are to valid objects elsewhere in the heap
-  if (FLSVerifyAllHeapReferences)
-  {
-    VerifyAllOopsClosure cl(_collector, this, span, past_remark,
-      _collector->markBitMap());
-
-    // Iterate over all oops in the heap.
-    CMSHeap::heap()->oop_iterate(&cl);
-  }
-
-  if (VerifyObjectStartArray) {
-    // Verify the block offset table
-    _bt.verify();
-  }
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::verifyFreeLists() const {
-  if (FLSVerifyLists) {
-    _dictionary->verify();
-    verifyIndexedFreeLists();
-  } else {
-    if (FLSVerifyDictionary) {
-      _dictionary->verify();
-    }
-    if (FLSVerifyIndexTable) {
-      verifyIndexedFreeLists();
-    }
-  }
-}
-#endif
-
-void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
-  size_t i = 0;
-  for (; i < IndexSetStart; i++) {
-    guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
-  }
-  for (; i < IndexSetSize; i++) {
-    verifyIndexedFreeList(i);
-  }
-}
-
-void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
-  FreeChunk* fc   =  _indexedFreeList[size].head();
-  FreeChunk* tail =  _indexedFreeList[size].tail();
-  size_t    num = _indexedFreeList[size].count();
-  size_t      n = 0;
-  guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
-            "Slot should have been empty");
-  for (; fc != NULL; fc = fc->next(), n++) {
-    guarantee(fc->size() == size, "Size inconsistency");
-    guarantee(fc->is_free(), "!free?");
-    guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
-    guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
-  }
-  guarantee(n == num, "Incorrect count");
-}
-
-#ifndef PRODUCT
-void CompactibleFreeListSpace::check_free_list_consistency() const {
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
-    "Some sizes can't be allocated without recourse to"
-    " linear allocation buffers");
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
-    "else MIN_TREE_CHUNK_SIZE is wrong");
-  assert(IndexSetStart != 0, "IndexSetStart not initialized");
-  assert(IndexSetStride != 0, "IndexSetStride not initialized");
-}
-#endif
-
-void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
-  assert_lock_strong(&_freelistLock);
-  LogTarget(Debug, gc, freelist, census) log;
-  if (!log.is_enabled()) {
-    return;
-  }
-  AdaptiveFreeList<FreeChunk> total;
-  log.print("end sweep# " SIZE_FORMAT, sweep_count);
-  ResourceMark rm;
-  LogStream ls(log);
-  outputStream* out = &ls;
-  AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
-  size_t total_free = 0;
-  for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
-    const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
-    total_free += fl->count() * fl->size();
-    if (i % (40*IndexSetStride) == 0) {
-      AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
-    }
-    fl->print_on(out);
-    total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
-    total.set_surplus(    total.surplus()     + fl->surplus()    );
-    total.set_desired(    total.desired()     + fl->desired()    );
-    total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
-    total.set_before_sweep(total.before_sweep() + fl->before_sweep());
-    total.set_count(      total.count()       + fl->count()      );
-    total.set_coal_births( total.coal_births()  + fl->coal_births() );
-    total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
-    total.set_split_births(total.split_births() + fl->split_births());
-    total.set_split_deaths(total.split_deaths() + fl->split_deaths());
-  }
-  total.print_on(out, "TOTAL");
-  log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
-  log.print("growth: %8.5f  deficit: %8.5f",
-            (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
-                    (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
-            (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
-  _dictionary->print_dict_census(out);
-}
-
-///////////////////////////////////////////////////////////////////////////
-// CompactibleFreeListSpaceLAB
-///////////////////////////////////////////////////////////////////////////
-
-#define VECTOR_257(x)                                                                                  \
-  /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
-  {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
-     x }
-
-// Initialize with default setting for CMS, _not_
-// generic OldPLABSize, whose static default is different; if overridden at the
-// command-line, this will get reinitialized via a call to
-// modify_initialization() below.
-AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
-  VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
-size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
-uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
-
-CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
-  _cfls(cfls)
-{
-  assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
-  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    _indexedFreeList[i].set_size(i);
-    _num_blocks[i] = 0;
-  }
-}
-
-static bool _CFLS_LAB_modified = false;
-
-void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
-  assert(!_CFLS_LAB_modified, "Call only once");
-  _CFLS_LAB_modified = true;
-  for (size_t i = CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    _blocks_to_claim[i].modify(n, wt, true /* force */);
-  }
-}
-
-HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
-  FreeChunk* res;
-  assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
-  if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
-    // This locking manages sync with other large object allocations.
-    MutexLocker x(_cfls->parDictionaryAllocLock(),
-                  Mutex::_no_safepoint_check_flag);
-    res = _cfls->getChunkFromDictionaryExact(word_sz);
-    if (res == NULL) return NULL;
-  } else {
-    AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
-    if (fl->count() == 0) {
-      // Attempt to refill this local free list.
-      get_from_global_pool(word_sz, fl);
-      // If it didn't work, give up.
-      if (fl->count() == 0) return NULL;
-    }
-    res = fl->get_chunk_at_head();
-    assert(res != NULL, "Why was count non-zero?");
-  }
-  res->markNotFree();
-  assert(!res->is_free(), "shouldn't be marked free");
-  assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
-  // mangle a just allocated object with a distinct pattern.
-  debug_only(res->mangleAllocated(word_sz));
-  return (HeapWord*)res;
-}
-
-// Get a chunk of blocks of the right size and update related
-// book-keeping stats
-void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
-  // Get the #blocks we want to claim
-  size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
-  assert(n_blks > 0, "Error");
-  assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
-  // In some cases, when the application has a phase change,
-  // there may be a sudden and sharp shift in the object survival
-  // profile, and updating the counts at the end of a scavenge
-  // may not be quick enough, giving rise to large scavenge pauses
-  // during these phase changes. It is beneficial to detect such
-  // changes on-the-fly during a scavenge and avoid such a phase-change
-  // pothole. The following code is a heuristic attempt to do that.
-  // It is protected by a product flag until we have gained
-  // enough experience with this heuristic and fine-tuned its behavior.
-  // WARNING: This might increase fragmentation if we overreact to
-  // small spikes, so some kind of historical smoothing based on
-  // previous experience with the greater reactivity might be useful.
-  // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
-  // default.
-  if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
-    //
-    // On a 32-bit VM, the denominator can become zero because of integer overflow,
-    // which is why there is a cast to double.
-    //
-    size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
-    n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
-    n_blks = MIN2(n_blks, CMSOldPLABMax);
-  }
-  assert(n_blks > 0, "Error");
-  _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
-  // Update stats table entry for this block size
-  _num_blocks[word_sz] += fl->count();
-}
-
-void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
-  for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
-           "Counter inconsistency");
-    if (_global_num_workers[i] > 0) {
-      // Need to smooth wrt historical average
-      if (ResizeOldPLAB) {
-        _blocks_to_claim[i].sample(
-          MAX2(CMSOldPLABMin,
-          MIN2(CMSOldPLABMax,
-               _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
-      }
-      // Reset counters for next round
-      _global_num_workers[i] = 0;
-      _global_num_blocks[i] = 0;
-      log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
-    }
-  }
-}
-
-// If this is changed in the future to allow parallel
-// access, one would need to take the FL locks and,
-// depending on how it is used, stagger access from
-// parallel threads to reduce contention.
-void CompactibleFreeListSpaceLAB::retire(int tid) {
-  // We run this single threaded with the world stopped;
-  // so no need for locks and such.
-  NOT_PRODUCT(Thread* t = Thread::current();)
-  assert(Thread::current()->is_VM_thread(), "Error");
-  for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
-       i < CompactibleFreeListSpace::IndexSetSize;
-       i += CompactibleFreeListSpace::IndexSetStride) {
-    assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
-           "Can't retire more than what we obtained");
-    if (_num_blocks[i] > 0) {
-      size_t num_retire =  _indexedFreeList[i].count();
-      assert(_num_blocks[i] > num_retire, "Should have used at least one");
-      {
-        // MutexLocker x(_cfls->_indexedFreeListParLocks[i],
-        //               Mutex::_no_safepoint_check_flag);
-
-        // Update globals stats for num_blocks used
-        _global_num_blocks[i] += (_num_blocks[i] - num_retire);
-        _global_num_workers[i]++;
-        assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
-        if (num_retire > 0) {
-          _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
-          // Reset this list.
-          _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
-          _indexedFreeList[i].set_size(i);
-        }
-      }
-      log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
-                          tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
-      // Reset stats for next round
-      _num_blocks[i]         = 0;
-    }
-  }
-}
-
-// Used by par_get_chunk_of_blocks() for the chunks from the
-// indexed_free_lists.  Looks for a chunk with size that is a multiple
-// of "word_sz" and if found, splits it into "word_sz" chunks and add
-// to the free list "fl".  "n" is the maximum number of chunks to
-// be added to "fl".
-bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-
-  // We'll try all multiples of word_sz in the indexed set, starting with
-  // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
-  // then try getting a big chunk and splitting it.
-  {
-    bool found;
-    int  k;
-    size_t cur_sz;
-    for (k = 1, cur_sz = k * word_sz, found = false;
-         (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
-         (CMSSplitIndexedFreeListBlocks || k <= 1);
-         k++, cur_sz = k * word_sz) {
-      AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
-      fl_for_cur_sz.set_size(cur_sz);
-      {
-        MutexLocker x(_indexedFreeListParLocks[cur_sz],
-                      Mutex::_no_safepoint_check_flag);
-        AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
-        if (gfl->count() != 0) {
-          // nn is the number of chunks of size cur_sz that
-          // we'd need to split k-ways each, in order to create
-          // "n" chunks of size word_sz each.
-          const size_t nn = MAX2(n/k, (size_t)1);
-          gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
-          found = true;
-          if (k > 1) {
-            // Update split death stats for the cur_sz-size blocks list:
-            // we increment the split death count by the number of blocks
-            // we just took from the cur_sz-size blocks list and which
-            // we will be splitting below.
-            ssize_t deaths = gfl->split_deaths() +
-                             fl_for_cur_sz.count();
-            gfl->set_split_deaths(deaths);
-          }
-        }
-      }
-      // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
-      if (found) {
-        if (k == 1) {
-          fl->prepend(&fl_for_cur_sz);
-        } else {
-          // Divide each block on fl_for_cur_sz up k ways.
-          FreeChunk* fc;
-          while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
-            // Must do this in reverse order, so that anybody attempting to
-            // access the main chunk sees it as a single free block until we
-            // change it.
-            size_t fc_size = fc->size();
-            assert(fc->is_free(), "Error");
-            for (int i = k-1; i >= 0; i--) {
-              FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
-              assert((i != 0) ||
-                        ((fc == ffc) && ffc->is_free() &&
-                         (ffc->size() == k*word_sz) && (fc_size == word_sz)),
-                        "Counting error");
-              ffc->set_size(word_sz);
-              ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-              ffc->link_next(NULL);
-              // Above must occur before BOT is updated below.
-              OrderAccess::storestore();
-              // splitting from the right, fc_size == i * word_sz
-              _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
-              fc_size -= word_sz;
-              assert(fc_size == i*word_sz, "Error");
-              _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
-              _bt.verify_single_block((HeapWord*)fc, fc_size);
-              _bt.verify_single_block((HeapWord*)ffc, word_sz);
-              // Push this on "fl".
-              fl->return_chunk_at_head(ffc);
-            }
-            // TRAP
-            assert(fl->tail()->next() == NULL, "List invariant.");
-          }
-        }
-        // Update birth stats for this block size.
-        size_t num = fl->count();
-        MutexLocker x(_indexedFreeListParLocks[word_sz],
-                      Mutex::_no_safepoint_check_flag);
-        ssize_t births = _indexedFreeList[word_sz].split_births() + num;
-        _indexedFreeList[word_sz].set_split_births(births);
-        return true;
-      }
-    }
-    return found;
-  }
-}
-
-FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
-
-  FreeChunk* fc = NULL;
-  FreeChunk* rem_fc = NULL;
-  size_t rem;
-  {
-    MutexLocker x(parDictionaryAllocLock(),
-                  Mutex::_no_safepoint_check_flag);
-    while (n > 0) {
-      fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
-      if (fc != NULL) {
-        break;
-      } else {
-        n--;
-      }
-    }
-    if (fc == NULL) return NULL;
-    // Otherwise, split up that block.
-    assert((ssize_t)n >= 1, "Control point invariant");
-    assert(fc->is_free(), "Error: should be a free block");
-    _bt.verify_single_block((HeapWord*)fc, fc->size());
-    const size_t nn = fc->size() / word_sz;
-    n = MIN2(nn, n);
-    assert((ssize_t)n >= 1, "Control point invariant");
-    rem = fc->size() - n * word_sz;
-    // If there is a remainder, and it's too small, allocate one fewer.
-    if (rem > 0 && rem < MinChunkSize) {
-      n--; rem += word_sz;
-    }
-    // Note that at this point we may have n == 0.
-    assert((ssize_t)n >= 0, "Control point invariant");
-
-    // If n is 0, the chunk fc that was found is not large
-    // enough to leave a viable remainder.  We are unable to
-    // allocate even one block.  Return fc to the
-    // dictionary and return, leaving "fl" empty.
-    if (n == 0) {
-      returnChunkToDictionary(fc);
-      return NULL;
-    }
-
-    _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
-    dictionary()->dict_census_update(fc->size(),
-                                     true /*split*/,
-                                     false /*birth*/);
-
-    // First return the remainder, if any.
-    // Note that we hold the lock until we decide if we're going to give
-    // back the remainder to the dictionary, since a concurrent allocation
-    // may otherwise see the heap as empty.  (We're willing to take that
-    // hit if the block is a small block.)
-    if (rem > 0) {
-      size_t prefix_size = n * word_sz;
-      rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
-      rem_fc->set_size(rem);
-      rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-      rem_fc->link_next(NULL);
-      // Above must occur before BOT is updated below.
-      assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
-      OrderAccess::storestore();
-      _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
-      assert(fc->is_free(), "Error");
-      fc->set_size(prefix_size);
-      if (rem >= IndexSetSize) {
-        returnChunkToDictionary(rem_fc);
-        dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
-        rem_fc = NULL;
-      }
-      // Otherwise, return it to the small list below.
-    }
-  }
-  if (rem_fc != NULL) {
-    MutexLocker x(_indexedFreeListParLocks[rem],
-                  Mutex::_no_safepoint_check_flag);
-    _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
-    _indexedFreeList[rem].return_chunk_at_head(rem_fc);
-    smallSplitBirth(rem);
-  }
-  assert(n * word_sz == fc->size(),
-         "Chunk size " SIZE_FORMAT " is not exactly splittable by "
-         SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
-         fc->size(), n, word_sz);
-  return fc;
-}
-
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
-
-  FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
-
-  if (fc == NULL) {
-    return;
-  }
-
-  size_t n = fc->size() / word_sz;
-
-  assert((ssize_t)n > 0, "Consistency");
-  // Now do the splitting up.
-  // Must do this in reverse order, so that anybody attempting to
-  // access the main chunk sees it as a single free block until we
-  // change it.
-  size_t fc_size = n * word_sz;
-  // All but first chunk in this loop
-  for (ssize_t i = n-1; i > 0; i--) {
-    FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
-    ffc->set_size(word_sz);
-    ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
-    ffc->link_next(NULL);
-    // Above must occur before BOT is updated below.
-    OrderAccess::storestore();
-    // splitting from the right, fc_size == (n - i + 1) * wordsize
-    _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
-    fc_size -= word_sz;
-    _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
-    _bt.verify_single_block((HeapWord*)ffc, ffc->size());
-    _bt.verify_single_block((HeapWord*)fc, fc_size);
-    // Push this on "fl".
-    fl->return_chunk_at_head(ffc);
-  }
-  // First chunk
-  assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
-  // The blocks above should show their new sizes before the first block below
-  fc->set_size(word_sz);
-  fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
-  fc->link_next(NULL);
-  _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
-  _bt.verify_single_block((HeapWord*)fc, fc->size());
-  fl->return_chunk_at_head(fc);
-
-  assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
-  {
-    // Update the stats for this block size.
-    MutexLocker x(_indexedFreeListParLocks[word_sz],
-                  Mutex::_no_safepoint_check_flag);
-    const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
-    _indexedFreeList[word_sz].set_split_births(births);
-    // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
-    // _indexedFreeList[word_sz].set_surplus(new_surplus);
-  }
-
-  // TRAP
-  assert(fl->tail()->next() == NULL, "List invariant.");
-}
-
-void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
-  assert(fl->count() == 0, "Precondition.");
-  assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
-         "Precondition");
-
-  if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
-    // Got it
-    return;
-  }
-
-  // Otherwise, we'll split a block from the dictionary.
-  par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
-}
-
-const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
-  const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
-  return ergo_max;
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel rescan. See CMSParRemarkTask where this is currently used.
-// XXX Need to suitably abstract and generalize this and the next
-// method into one.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_rescan(int n_threads) {
-  // The "size" of each task is fixed according to rescan_task_size.
-  assert(n_threads > 0, "Unexpected n_threads argument");
-  const size_t task_size = rescan_task_size();
-  size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
-  assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
-  assert(n_tasks == 0 ||
-         ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
-          (used_region().start() + n_tasks*task_size >= used_region().end())),
-         "n_tasks calculation incorrect");
-  SequentialSubTasksDone* pst = conc_par_seq_tasks();
-  assert(!pst->valid(), "Clobbering existing data?");
-  // Sets the condition for completion of the subtask (how many threads
-  // need to finish in order to be done).
-  pst->set_n_threads(n_threads);
-  pst->set_n_tasks((int)n_tasks);
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
-void
-CompactibleFreeListSpace::
-initialize_sequential_subtasks_for_marking(int n_threads,
-                                           HeapWord* low) {
-  // The "size" of each task is fixed according to rescan_task_size.
-  assert(n_threads > 0, "Unexpected n_threads argument");
-  const size_t task_size = marking_task_size();
-  assert(task_size > CardTable::card_size_in_words &&
-         (task_size %  CardTable::card_size_in_words == 0),
-         "Otherwise arithmetic below would be incorrect");
-  MemRegion span = _old_gen->reserved();
-  if (low != NULL) {
-    if (span.contains(low)) {
-      // Align low down to  a card boundary so that
-      // we can use block_offset_careful() on span boundaries.
-      HeapWord* aligned_low = align_down(low, CardTable::card_size);
-      // Clip span prefix at aligned_low
-      span = span.intersection(MemRegion(aligned_low, span.end()));
-    } else if (low > span.end()) {
-      span = MemRegion(low, low);  // Null region
-    } // else use entire span
-  }
-  assert(span.is_empty() ||
-         ((uintptr_t)span.start() %  CardTable::card_size == 0),
-        "span should start at a card boundary");
-  size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
-  assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
-  assert(n_tasks == 0 ||
-         ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
-          (span.start() + n_tasks*task_size >= span.end())),
-         "n_tasks calculation incorrect");
-  SequentialSubTasksDone* pst = conc_par_seq_tasks();
-  assert(!pst->valid(), "Clobbering existing data?");
-  // Sets the condition for completion of the subtask (how many threads
-  // need to finish in order to be done).
-  pst->set_n_threads(n_threads);
-  pst->set_n_tasks((int)n_tasks);
-}
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,758 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
-#define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
-
-#include "gc/cms/adaptiveFreeList.hpp"
-#include "gc/cms/promotionInfo.hpp"
-#include "gc/shared/blockOffsetTable.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/space.hpp"
-#include "logging/log.hpp"
-#include "memory/binaryTreeDictionary.hpp"
-#include "memory/freeList.hpp"
-
-// Classes in support of keeping track of promotions into a non-Contiguous
-// space, in this case a CompactibleFreeListSpace.
-
-// Forward declarations
-class CMSCollector;
-class CompactibleFreeListSpace;
-class ConcurrentMarkSweepGeneration;
-class BlkClosure;
-class BlkClosureCareful;
-class FreeChunk;
-class UpwardsObjectClosure;
-class ObjectClosureCareful;
-class Klass;
-
-class AFLBinaryTreeDictionary : public BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > {
- public:
-  AFLBinaryTreeDictionary(MemRegion mr)
-      : BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >(mr) {}
-
-  // Find the list with size "size" in the binary tree and update
-  // the statistics in the list according to "split" (chunk was
-  // split or coalesce) and "birth" (chunk was added or removed).
-  void       dict_census_update(size_t size, bool split, bool birth);
-  // Return true if the dictionary is overpopulated (more chunks of
-  // this size than desired) for size "size".
-  bool       coal_dict_over_populated(size_t size);
-  // Methods called at the beginning of a sweep to prepare the
-  // statistics for the sweep.
-  void       begin_sweep_dict_census(double coalSurplusPercent,
-                                     float inter_sweep_current,
-                                     float inter_sweep_estimate,
-                                     float intra_sweep_estimate);
-  // Methods called after the end of a sweep to modify the
-  // statistics for the sweep.
-  void       end_sweep_dict_census(double splitSurplusPercent);
-  // Accessors for statistics
-  void       set_tree_surplus(double splitSurplusPercent);
-  void       set_tree_hints(void);
-  // Reset statistics for all the lists in the tree.
-  void       clear_tree_census(void);
-  // Print the statistics for all the lists in the tree.  Also may
-  // print out summaries.
-  void       print_dict_census(outputStream* st) const;
-};
-
-class LinearAllocBlock {
- public:
-  LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0),
-    _allocation_size_limit(0) {}
-  void set(HeapWord* ptr, size_t word_size, size_t refill_size,
-    size_t allocation_size_limit) {
-    _ptr = ptr;
-    _word_size = word_size;
-    _refillSize = refill_size;
-    _allocation_size_limit = allocation_size_limit;
-  }
-  HeapWord* _ptr;
-  size_t    _word_size;
-  size_t    _refillSize;
-  size_t    _allocation_size_limit;  // Largest size that will be allocated
-
-  void print_on(outputStream* st) const;
-};
-
-// Concrete subclass of CompactibleSpace that implements
-// a free list space, such as used in the concurrent mark sweep
-// generation.
-
-class CompactibleFreeListSpace: public CompactibleSpace {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepGeneration;
-  friend class CMSCollector;
-  // Local alloc buffer for promotion into this space.
-  friend class CompactibleFreeListSpaceLAB;
-  // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space);
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_compact(SpaceType* space);
-  template <typename SpaceType>
-  friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space);
-  template <typename SpaceType>
-  friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
-
-  // "Size" of chunks of work (executed during parallel remark phases
-  // of CMS collection); this probably belongs in CMSCollector, although
-  // it's cached here because it's used in
-  // initialize_sequential_subtasks_for_rescan() which modifies
-  // par_seq_tasks which also lives in Space. XXX
-  const size_t _rescan_task_size;
-  const size_t _marking_task_size;
-
-  // Yet another sequential tasks done structure. This supports
-  // CMS GC, where we have threads dynamically
-  // claiming sub-tasks from a larger parallel task.
-  SequentialSubTasksDone _conc_par_seq_tasks;
-
-  BlockOffsetArrayNonContigSpace _bt;
-
-  CMSCollector* _collector;
-  ConcurrentMarkSweepGeneration* _old_gen;
-
-  // Data structures for free blocks (used during allocation/sweeping)
-
-  // Allocation is done linearly from two different blocks depending on
-  // whether the request is small or large, in an effort to reduce
-  // fragmentation. We assume that any locking for allocation is done
-  // by the containing generation. Thus, none of the methods in this
-  // space are re-entrant.
-  enum SomeConstants {
-    SmallForLinearAlloc = 16,        // size < this then use _sLAB
-    SmallForDictionary  = 257,       // size < this then use _indexedFreeList
-    IndexSetSize        = SmallForDictionary  // keep this odd-sized
-  };
-  static size_t IndexSetStart;
-  static size_t IndexSetStride;
-  static size_t _min_chunk_size_in_bytes;
-
- private:
-  enum FitStrategyOptions {
-    FreeBlockStrategyNone = 0,
-    FreeBlockBestFitFirst
-  };
-
-  PromotionInfo _promoInfo;
-
-  // Helps to impose a global total order on freelistLock ranks;
-  // assumes that CFLSpace's are allocated in global total order
-  static int   _lockRank;
-
-  // A lock protecting the free lists and free blocks;
-  // mutable because of ubiquity of locking even for otherwise const methods
-  mutable Mutex _freelistLock;
-
-  // Locking verifier convenience function
-  void assert_locked() const PRODUCT_RETURN;
-  void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
-
-  // Linear allocation blocks
-  LinearAllocBlock _smallLinearAllocBlock;
-
-  AFLBinaryTreeDictionary* _dictionary;    // Pointer to dictionary for large size blocks
-
-  // Indexed array for small size blocks
-  AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize];
-
-  // Allocation strategy
-  bool _fitStrategy;  // Use best fit strategy
-
-  // This is an address close to the largest free chunk in the heap.
-  // It is currently assumed to be at the end of the heap.  Free
-  // chunks with addresses greater than nearLargestChunk are coalesced
-  // in an effort to maintain a large chunk at the end of the heap.
-  HeapWord*  _nearLargestChunk;
-
-  // Used to keep track of limit of sweep for the space
-  HeapWord* _sweep_limit;
-
-  // Stable value of used().
-  size_t _used_stable;
-
-  // Used to make the young collector update the mod union table
-  MemRegionClosure* _preconsumptionDirtyCardClosure;
-
-  // Support for compacting cms
-  HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
-  HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top);
-
-  // Initialization helpers.
-  void initializeIndexedFreeListArray();
-
-  // Extra stuff to manage promotion parallelism.
-
-  // A lock protecting the dictionary during par promotion allocation.
-  mutable Mutex _parDictionaryAllocLock;
-  Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; }
-
-  // Locks protecting the exact lists during par promotion allocation.
-  Mutex* _indexedFreeListParLocks[IndexSetSize];
-
-  // Attempt to obtain up to "n" blocks of the size "word_sz" (which is
-  // required to be smaller than "IndexSetSize".)  If successful,
-  // adds them to "fl", which is required to be an empty free list.
-  // If the count of "fl" is negative, it's absolute value indicates a
-  // number of free chunks that had been previously "borrowed" from global
-  // list of size "word_sz", and must now be decremented.
-  void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
-  // Used by par_get_chunk_of_blocks() for the chunks from the
-  // indexed_free_lists.
-  bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
-  // Used by par_get_chunk_of_blocks_dictionary() to get a chunk
-  // evenly splittable into "n" "word_sz" chunks.  Returns that
-  // evenly splittable chunk.  May split a larger chunk to get the
-  // evenly splittable chunk.
-  FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n);
-
-  // Used by par_get_chunk_of_blocks() for the chunks from the
-  // dictionary.
-  void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl);
-
-  // Allocation helper functions
-  // Allocate using a strategy that takes from the indexed free lists
-  // first.  This allocation strategy assumes a companion sweeping
-  // strategy that attempts to keep the needed number of chunks in each
-  // indexed free lists.
-  HeapWord* allocate_adaptive_freelists(size_t size);
-
-  // Gets a chunk from the linear allocation block (LinAB).  If there
-  // is not enough space in the LinAB, refills it.
-  HeapWord*  getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size);
-  HeapWord*  getChunkFromSmallLinearAllocBlock(size_t size);
-  // Get a chunk from the space remaining in the linear allocation block.  Do
-  // not attempt to refill if the space is not available, return NULL.  Do the
-  // repairs on the linear allocation block as appropriate.
-  HeapWord*  getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size);
-  inline HeapWord*  getChunkFromSmallLinearAllocBlockRemainder(size_t size);
-
-  // Helper function for getChunkFromIndexedFreeList.
-  // Replenish the indexed free list for this "size".  Do not take from an
-  // underpopulated size.
-  FreeChunk*  getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
-
-  // Get a chunk from the indexed free list.  If the indexed free list
-  // does not have a free chunk, try to replenish the indexed free list
-  // then get the free chunk from the replenished indexed free list.
-  inline FreeChunk* getChunkFromIndexedFreeList(size_t size);
-
-  // The returned chunk may be larger than requested (or null).
-  FreeChunk* getChunkFromDictionary(size_t size);
-  // The returned chunk is the exact size requested (or null).
-  FreeChunk* getChunkFromDictionaryExact(size_t size);
-
-  // Find a chunk in the indexed free list that is the best
-  // fit for size "numWords".
-  FreeChunk* bestFitSmall(size_t numWords);
-  // For free list "fl" of chunks of size > numWords,
-  // remove a chunk, split off a chunk of size numWords
-  // and return it.  The split off remainder is returned to
-  // the free lists.  The old name for getFromListGreater
-  // was lookInListGreater.
-  FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords);
-  // Get a chunk in the indexed free list or dictionary,
-  // by considering a larger chunk and splitting it.
-  FreeChunk* getChunkFromGreater(size_t numWords);
-  //  Verify that the given chunk is in the indexed free lists.
-  bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const;
-  // Remove the specified chunk from the indexed free lists.
-  void       removeChunkFromIndexedFreeList(FreeChunk* fc);
-  // Remove the specified chunk from the dictionary.
-  void       removeChunkFromDictionary(FreeChunk* fc);
-  // Split a free chunk into a smaller free chunk of size "new_size".
-  // Return the smaller free chunk and return the remainder to the
-  // free lists.
-  FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size);
-  // Add a chunk to the free lists.
-  void       addChunkToFreeLists(HeapWord* chunk, size_t size);
-  // Add a chunk to the free lists, preferring to suffix it
-  // to the last free chunk at end of space if possible, and
-  // updating the block census stats as well as block offset table.
-  // Take any locks as appropriate if we are multithreaded.
-  void       addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size);
-  // Add a free chunk to the indexed free lists.
-  void       returnChunkToFreeList(FreeChunk* chunk);
-  // Add a free chunk to the dictionary.
-  void       returnChunkToDictionary(FreeChunk* chunk);
-
-  // Functions for maintaining the linear allocation buffers (LinAB).
-  // Repairing a linear allocation block refers to operations
-  // performed on the remainder of a LinAB after an allocation
-  // has been made from it.
-  void       repairLinearAllocationBlocks();
-  void       repairLinearAllocBlock(LinearAllocBlock* blk);
-  void       refillLinearAllocBlock(LinearAllocBlock* blk);
-  void       refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk);
-  void       refillLinearAllocBlocksIfNeeded();
-
-  void       verify_objects_initialized() const;
-
-  // Statistics reporting helper functions
-  void       reportFreeListStatistics(const char* title) const;
-  void       reportIndexedFreeListStatistics(outputStream* st) const;
-  size_t     maxChunkSizeInIndexedFreeLists() const;
-  size_t     numFreeBlocksInIndexedFreeLists() const;
-  // Accessor
-  HeapWord* unallocated_block() const {
-    if (BlockOffsetArrayUseUnallocatedBlock) {
-      HeapWord* ub = _bt.unallocated_block();
-      assert(ub >= bottom() &&
-             ub <= end(), "space invariant");
-      return ub;
-    } else {
-      return end();
-    }
-  }
-  void freed(HeapWord* start, size_t size) {
-    _bt.freed(start, size);
-  }
-
-  // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
-  // See comments for CompactibleSpace for more information.
-  inline HeapWord* scan_limit() const {
-    return end();
-  }
-
-  inline bool scanned_block_is_obj(const HeapWord* addr) const {
-    return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call
-  }
-
-  inline size_t scanned_block_size(const HeapWord* addr) const {
-    return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call
-  }
-
-  inline size_t adjust_obj_size(size_t size) const {
-    return adjustObjectSize(size);
-  }
-
-  inline size_t obj_size(const HeapWord* addr) const;
-
- protected:
-  // Reset the indexed free list to its initial empty condition.
-  void resetIndexedFreeListArray();
-  // Reset to an initial state with a single free block described
-  // by the MemRegion parameter.
-  void reset(MemRegion mr);
-  // Return the total number of words in the indexed free lists.
-  size_t     totalSizeInIndexedFreeLists() const;
-
- public:
-  // Constructor
-  CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr);
-  // Accessors
-  bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; }
-  AFLBinaryTreeDictionary* dictionary() const { return _dictionary; }
-  HeapWord* nearLargestChunk() const { return _nearLargestChunk; }
-  void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; }
-
-  // Set CMS global values.
-  static void set_cms_values();
-
-  // Return the free chunk at the end of the space.  If no such
-  // chunk exists, return NULL.
-  FreeChunk* find_chunk_at_end();
-
-  void set_collector(CMSCollector* collector) { _collector = collector; }
-
-  // Support for parallelization of rescan and marking.
-  const size_t rescan_task_size()  const { return _rescan_task_size;  }
-  const size_t marking_task_size() const { return _marking_task_size; }
-  // Return ergonomic max size for CMSRescanMultiple and CMSConcMarkMultiple.
-  const size_t max_flag_size_for_task_size() const;
-  SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; }
-  void initialize_sequential_subtasks_for_rescan(int n_threads);
-  void initialize_sequential_subtasks_for_marking(int n_threads,
-         HeapWord* low = NULL);
-
-  virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
-    return _preconsumptionDirtyCardClosure;
-  }
-
-  void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) {
-    _preconsumptionDirtyCardClosure = cl;
-  }
-
-  // Space enquiries
-  size_t used() const;
-  size_t free() const;
-  size_t max_alloc_in_words() const;
-  // XXX: should have a less conservative used_region() than that of
-  // Space; we could consider keeping track of highest allocated
-  // address and correcting that at each sweep, as the sweeper
-  // goes through the entire allocated part of the generation. We
-  // could also use that information to keep the sweeper from
-  // sweeping more than is necessary. The allocator and sweeper will
-  // of course need to synchronize on this, since the sweeper will
-  // try to bump down the address and the allocator will try to bump it up.
-  // For now, however, we'll just use the default used_region()
-  // which overestimates the region by returning the entire
-  // committed region (this is safe, but inefficient).
-
-  // Returns monotonically increasing stable used space bytes for CMS.
-  // This is required for jstat and other memory monitoring tools
-  // that might otherwise see inconsistent used space values during a garbage
-  // collection, promotion or allocation into compactibleFreeListSpace.
-  // The value returned by this function might be smaller than the
-  // actual value.
-  size_t used_stable() const;
-  // Recalculate and cache the current stable used() value. Only to be called
-  // in places where we can be sure that the result is stable.
-  void recalculate_used_stable();
-
-  // Returns a subregion of the space containing all the objects in
-  // the space.
-  MemRegion used_region() const {
-    return MemRegion(bottom(),
-                     BlockOffsetArrayUseUnallocatedBlock ?
-                     unallocated_block() : end());
-  }
-
-  virtual bool is_free_block(const HeapWord* p) const;
-
-  // Resizing support
-  void set_end(HeapWord* value);  // override
-
-  // Never mangle CompactibleFreeListSpace
-  void mangle_unused_area() {}
-  void mangle_unused_area_complete() {}
-
-  // Mutual exclusion support
-  Mutex* freelistLock() const { return &_freelistLock; }
-
-  // Iteration support
-  void oop_iterate(OopIterateClosure* cl);
-
-  void object_iterate(ObjectClosure* blk);
-  // Apply the closure to each object in the space whose references
-  // point to objects in the heap.  The usage of CompactibleFreeListSpace
-  // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
-  // objects in the space with references to objects that are no longer
-  // valid.  For example, an object may reference another object
-  // that has already been sweep up (collected).  This method uses
-  // obj_is_alive() to determine whether it is safe to iterate of
-  // an object.
-  void safe_object_iterate(ObjectClosure* blk);
-
-  // Iterate over all objects that intersect with mr, calling "cl->do_object"
-  // on each.  There is an exception to this: if this closure has already
-  // been invoked on an object, it may skip such objects in some cases.  This is
-  // Most likely to happen in an "upwards" (ascending address) iteration of
-  // MemRegions.
-  void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl);
-
-  // Requires that "mr" be entirely within the space.
-  // Apply "cl->do_object" to all objects that intersect with "mr".
-  // If the iteration encounters an unparseable portion of the region,
-  // terminate the iteration and return the address of the start of the
-  // subregion that isn't done.  Return of "NULL" indicates that the
-  // iteration completed.
-  HeapWord* object_iterate_careful_m(MemRegion mr,
-                                     ObjectClosureCareful* cl);
-
-  // Override: provides a DCTO_CL specific to this kind of space.
-  DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
-                                     CardTable::PrecisionStyle precision,
-                                     HeapWord* boundary,
-                                     bool parallel);
-
-  void blk_iterate(BlkClosure* cl);
-  void blk_iterate_careful(BlkClosureCareful* cl);
-  HeapWord* block_start_const(const void* p) const;
-  HeapWord* block_start_careful(const void* p) const;
-  size_t block_size(const HeapWord* p) const;
-  size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const;
-  bool block_is_obj(const HeapWord* p) const;
-  bool obj_is_alive(const HeapWord* p) const;
-  size_t block_size_nopar(const HeapWord* p) const;
-  bool block_is_obj_nopar(const HeapWord* p) const;
-
-  // Iteration support for promotion
-  void save_marks();
-  bool no_allocs_since_save_marks();
-
-  // Iteration support for sweeping
-  void save_sweep_limit() {
-    _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
-                   unallocated_block() : end();
-    log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT
-                                 "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
-                                 p2i(_sweep_limit), p2i(bottom()), p2i(end()));
-  }
-  NOT_PRODUCT(
-    void clear_sweep_limit() { _sweep_limit = NULL; }
-  )
-  HeapWord* sweep_limit() { return _sweep_limit; }
-
-  // Apply "blk->do_oop" to the addresses of all reference fields in objects
-  // promoted into this generation since the most recent save_marks() call.
-  // Fields in objects allocated by applications of the closure
-  // *are* included in the iteration. Thus, when the iteration completes
-  // there should be no further such objects remaining.
-  template <typename OopClosureType>
-  void oop_since_save_marks_iterate(OopClosureType* blk);
-
-  // Allocation support
-  HeapWord* allocate(size_t size);
-  HeapWord* par_allocate(size_t size);
-
-  oop       promote(oop obj, size_t obj_size);
-  void      gc_prologue();
-  void      gc_epilogue();
-
-  // This call is used by a containing CMS generation / collector
-  // to inform the CFLS space that a sweep has been completed
-  // and that the space can do any related house-keeping functions.
-  void      sweep_completed();
-
-  // For an object in this space, the mark-word's two
-  // LSB's having the value [11] indicates that it has been
-  // promoted since the most recent call to save_marks() on
-  // this generation and has not subsequently been iterated
-  // over (using oop_since_save_marks_iterate() above).
-  // This property holds only for single-threaded collections,
-  // and is typically used for Cheney scans; for MT scavenges,
-  // the property holds for all objects promoted during that
-  // scavenge for the duration of the scavenge and is used
-  // by card-scanning to avoid scanning objects (being) promoted
-  // during that scavenge.
-  bool obj_allocated_since_save_marks(const oop obj) const {
-    assert(is_in_reserved(obj), "Wrong space?");
-    return ((PromotedObject*)obj)->hasPromotedMark();
-  }
-
-  // A worst-case estimate of the space required (in HeapWords) to expand the
-  // heap when promoting an obj of size obj_size.
-  size_t expansionSpaceRequired(size_t obj_size) const;
-
-  FreeChunk* allocateScratch(size_t size);
-
-  // Returns true if either the small or large linear allocation buffer is empty.
-  bool       linearAllocationWouldFail() const;
-
-  // Adjust the chunk for the minimum size.  This version is called in
-  // most cases in CompactibleFreeListSpace methods.
-  inline static size_t adjustObjectSize(size_t size) {
-    return align_object_size(MAX2(size, (size_t)MinChunkSize));
-  }
-  // This is a virtual version of adjustObjectSize() that is called
-  // only occasionally when the compaction space changes and the type
-  // of the new compaction space is is only known to be CompactibleSpace.
-  size_t adjust_object_size_v(size_t size) const {
-    return adjustObjectSize(size);
-  }
-  // Minimum size of a free block.
-  virtual size_t minimum_free_block_size() const { return MinChunkSize; }
-  void      removeFreeChunkFromFreeLists(FreeChunk* chunk);
-  void      addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size,
-              bool coalesced);
-
-  // Support for compaction.
-  void prepare_for_compaction(CompactPoint* cp);
-  void adjust_pointers();
-  void compact();
-  // Reset the space to reflect the fact that a compaction of the
-  // space has been done.
-  virtual void reset_after_compaction();
-
-  // Debugging support.
-  void print()                            const;
-  void print_on(outputStream* st)         const;
-  void prepare_for_verify();
-  void verify()                           const;
-  void verifyFreeLists()                  const PRODUCT_RETURN;
-  void verifyIndexedFreeLists()           const;
-  void verifyIndexedFreeList(size_t size) const;
-  // Verify that the given chunk is in the free lists:
-  // i.e. either the binary tree dictionary, the indexed free lists
-  // or the linear allocation block.
-  bool verify_chunk_in_free_list(FreeChunk* fc) const;
-  // Verify that the given chunk is the linear allocation block.
-  bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const;
-  // Do some basic checks on the the free lists.
-  void check_free_list_consistency()      const PRODUCT_RETURN;
-
-  // Printing support
-  void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
-  void print_indexed_free_lists(outputStream* st) const;
-  void print_dictionary_free_lists(outputStream* st) const;
-  void print_promo_info_blocks(outputStream* st) const;
-
-  NOT_PRODUCT (
-    void initializeIndexedFreeListArrayReturnedBytes();
-    size_t sumIndexedFreeListArrayReturnedBytes();
-    // Return the total number of chunks in the indexed free lists.
-    size_t totalCountInIndexedFreeLists() const;
-    // Return the total number of chunks in the space.
-    size_t totalCount();
-  )
-
-  // The census consists of counts of the quantities such as
-  // the current count of the free chunks, number of chunks
-  // created as a result of the split of a larger chunk or
-  // coalescing of smaller chucks, etc.  The counts in the
-  // census is used to make decisions on splitting and
-  // coalescing of chunks during the sweep of garbage.
-
-  // Print the statistics for the free lists.
-  void printFLCensus(size_t sweep_count) const;
-
-  // Statistics functions
-  // Initialize census for lists before the sweep.
-  void beginSweepFLCensus(float inter_sweep_current,
-                          float inter_sweep_estimate,
-                          float intra_sweep_estimate);
-  // Set the surplus for each of the free lists.
-  void setFLSurplus();
-  // Set the hint for each of the free lists.
-  void setFLHints();
-  // Clear the census for each of the free lists.
-  void clearFLCensus();
-  // Perform functions for the census after the end of the sweep.
-  void endSweepFLCensus(size_t sweep_count);
-  // Return true if the count of free chunks is greater
-  // than the desired number of free chunks.
-  bool coalOverPopulated(size_t size);
-
-// Record (for each size):
-//
-//   split-births = #chunks added due to splits in (prev-sweep-end,
-//      this-sweep-start)
-//   split-deaths = #chunks removed for splits in (prev-sweep-end,
-//      this-sweep-start)
-//   num-curr     = #chunks at start of this sweep
-//   num-prev     = #chunks at end of previous sweep
-//
-// The above are quantities that are measured. Now define:
-//
-//   num-desired := num-prev + split-births - split-deaths - num-curr
-//
-// Roughly, num-prev + split-births is the supply,
-// split-deaths is demand due to other sizes
-// and num-curr is what we have left.
-//
-// Thus, num-desired is roughly speaking the "legitimate demand"
-// for blocks of this size and what we are striving to reach at the
-// end of the current sweep.
-//
-// For a given list, let num-len be its current population.
-// Define, for a free list of a given size:
-//
-//   coal-overpopulated := num-len >= num-desired * coal-surplus
-// (coal-surplus is set to 1.05, i.e. we allow a little slop when
-// coalescing -- we do not coalesce unless we think that the current
-// supply has exceeded the estimated demand by more than 5%).
-//
-// For the set of sizes in the binary tree, which is neither dense nor
-// closed, it may be the case that for a particular size we have never
-// had, or do not now have, or did not have at the previous sweep,
-// chunks of that size. We need to extend the definition of
-// coal-overpopulated to such sizes as well:
-//
-//   For a chunk in/not in the binary tree, extend coal-overpopulated
-//   defined above to include all sizes as follows:
-//
-//   . a size that is non-existent is coal-overpopulated
-//   . a size that has a num-desired <= 0 as defined above is
-//     coal-overpopulated.
-//
-// Also define, for a chunk heap-offset C and mountain heap-offset M:
-//
-//   close-to-mountain := C >= 0.99 * M
-//
-// Now, the coalescing strategy is:
-//
-//    Coalesce left-hand chunk with right-hand chunk if and
-//    only if:
-//
-//      EITHER
-//        . left-hand chunk is of a size that is coal-overpopulated
-//      OR
-//        . right-hand chunk is close-to-mountain
-  void smallCoalBirth(size_t size);
-  void smallCoalDeath(size_t size);
-  void coalBirth(size_t size);
-  void coalDeath(size_t size);
-  void smallSplitBirth(size_t size);
-  void smallSplitDeath(size_t size);
-  void split_birth(size_t size);
-  void splitDeath(size_t size);
-  void split(size_t from, size_t to1);
-
-  double flsFrag() const;
-};
-
-// A parallel-GC-thread-local allocation buffer for allocation into a
-// CompactibleFreeListSpace.
-class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> {
-  // The space that this buffer allocates into.
-  CompactibleFreeListSpace* _cfls;
-
-  // Our local free lists.
-  AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
-
-  // Initialized from a command-line arg.
-
-  // Allocation statistics in support of dynamic adjustment of
-  // #blocks to claim per get_from_global_pool() call below.
-  static AdaptiveWeightedAverage
-                 _blocks_to_claim  [CompactibleFreeListSpace::IndexSetSize];
-  static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
-  static uint   _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
-  size_t        _num_blocks        [CompactibleFreeListSpace::IndexSetSize];
-
-  // Internal work method
-  void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
-
-public:
-  static const int _default_dynamic_old_plab_size = 16;
-  static const int _default_static_old_plab_size  = 50;
-
-  CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls);
-
-  // Allocate and return a block of the given size, or else return NULL.
-  HeapWord* alloc(size_t word_sz);
-
-  // Return any unused portions of the buffer to the global pool.
-  void retire(int tid);
-
-  // Dynamic OldPLABSize sizing
-  static void compute_desired_plab_size();
-  // When the settings are modified from default static initialization
-  static void modify_initialization(size_t n, unsigned wt);
-};
-
-size_t PromotionInfo::refillSize() const {
-  const size_t CMSSpoolBlockSize = 256;
-  const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markWord)
-                                   * CMSSpoolBlockSize);
-  return CompactibleFreeListSpace::adjustObjectSize(sz);
-}
-
-#endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
-#define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
-
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/promotionInfo.inline.hpp"
-
-template <typename OopClosureType>
-void CompactibleFreeListSpace::oop_since_save_marks_iterate(OopClosureType* blk) {
-  _promoInfo.promoted_oops_iterate(blk);
-
-  // This also restores any displaced headers and removes the elements from
-  // the iteration set as they are processed, so that we have a clean slate
-  // at the end of the iteration. Note, thus, that if new objects are
-  // promoted as a result of the iteration they are iterated over as well.
-  assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
-}
-
-#endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_INLINE_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8145 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoaderDataGraph.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "code/codeCache.hpp"
-#include "gc/cms/cmsGCStats.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsOopClosures.inline.hpp"
-#include "gc/cms/cmsVMOperations.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/cms/promotionInfo.inline.hpp"
-#include "gc/serial/genMarkSweep.hpp"
-#include "gc/serial/tenuredGeneration.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/cardGeneration.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "gc/shared/collectorCounters.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcPolicyCounters.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/owstTaskTerminator.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/allocation.hpp"
-#include "memory/binaryTreeDictionary.inline.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/padded.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/flags/flagSetting.hpp"
-#include "runtime/globals_extension.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/timer.hpp"
-#include "runtime/vmThread.hpp"
-#include "services/memoryService.hpp"
-#include "services/runtimeService.hpp"
-#include "utilities/align.hpp"
-#include "utilities/stack.inline.hpp"
-#if INCLUDE_JVMCI
-#include "jvmci/jvmci.hpp"
-#endif
-
-// statics
-CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
-bool CMSCollector::_full_gc_requested = false;
-GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
-
-//////////////////////////////////////////////////////////////////
-// In support of CMS/VM thread synchronization
-//////////////////////////////////////////////////////////////////
-// We split use of the CGC_lock into 2 "levels".
-// The low-level locking is of the usual CGC_lock monitor. We introduce
-// a higher level "token" (hereafter "CMS token") built on top of the
-// low level monitor (hereafter "CGC lock").
-// The token-passing protocol gives priority to the VM thread. The
-// CMS-lock doesn't provide any fairness guarantees, but clients
-// should ensure that it is only held for very short, bounded
-// durations.
-//
-// When either of the CMS thread or the VM thread is involved in
-// collection operations during which it does not want the other
-// thread to interfere, it obtains the CMS token.
-//
-// If either thread tries to get the token while the other has
-// it, that thread waits. However, if the VM thread and CMS thread
-// both want the token, then the VM thread gets priority while the
-// CMS thread waits. This ensures, for instance, that the "concurrent"
-// phases of the CMS thread's work do not block out the VM thread
-// for long periods of time as the CMS thread continues to hog
-// the token. (See bug 4616232).
-//
-// The baton-passing functions are, however, controlled by the
-// flags _foregroundGCShouldWait and _foregroundGCIsActive,
-// and here the low-level CMS lock, not the high level token,
-// ensures mutual exclusion.
-//
-// Two important conditions that we have to satisfy:
-// 1. if a thread does a low-level wait on the CMS lock, then it
-//    relinquishes the CMS token if it were holding that token
-//    when it acquired the low-level CMS lock.
-// 2. any low-level notifications on the low-level lock
-//    should only be sent when a thread has relinquished the token.
-//
-// In the absence of either property, we'd have potential deadlock.
-//
-// We protect each of the CMS (concurrent and sequential) phases
-// with the CMS _token_, not the CMS _lock_.
-//
-// The only code protected by CMS lock is the token acquisition code
-// itself, see ConcurrentMarkSweepThread::[de]synchronize(), and the
-// baton-passing code.
-//
-// Unfortunately, i couldn't come up with a good abstraction to factor and
-// hide the naked CGC_lock manipulation in the baton-passing code
-// further below. That's something we should try to do. Also, the proof
-// of correctness of this 2-level locking scheme is far from obvious,
-// and potentially quite slippery. We have an uneasy suspicion, for instance,
-// that there may be a theoretical possibility of delay/starvation in the
-// low-level lock/wait/notify scheme used for the baton-passing because of
-// potential interference with the priority scheme embodied in the
-// CMS-token-passing protocol. See related comments at a CGC_lock->wait()
-// invocation further below and marked with "XXX 20011219YSR".
-// Indeed, as we note elsewhere, this may become yet more slippery
-// in the presence of multiple CMS and/or multiple VM threads. XXX
-
-class CMSTokenSync: public StackObj {
- private:
-  bool _is_cms_thread;
- public:
-  CMSTokenSync(bool is_cms_thread):
-    _is_cms_thread(is_cms_thread) {
-    assert(is_cms_thread == Thread::current()->is_ConcurrentGC_thread(),
-           "Incorrect argument to constructor");
-    ConcurrentMarkSweepThread::synchronize(_is_cms_thread);
-  }
-
-  ~CMSTokenSync() {
-    assert(_is_cms_thread ?
-             ConcurrentMarkSweepThread::cms_thread_has_cms_token() :
-             ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-          "Incorrect state");
-    ConcurrentMarkSweepThread::desynchronize(_is_cms_thread);
-  }
-};
-
-// Convenience class that does a CMSTokenSync, and then acquires
-// upto three locks.
-class CMSTokenSyncWithLocks: public CMSTokenSync {
- private:
-  // Note: locks are acquired in textual declaration order
-  // and released in the opposite order
-  MutexLocker _locker1, _locker2, _locker3;
- public:
-  CMSTokenSyncWithLocks(bool is_cms_thread, Mutex* mutex1,
-                        Mutex* mutex2 = NULL, Mutex* mutex3 = NULL):
-    CMSTokenSync(is_cms_thread),
-    _locker1(mutex1, Mutex::_no_safepoint_check_flag),
-    _locker2(mutex2, Mutex::_no_safepoint_check_flag),
-    _locker3(mutex3, Mutex::_no_safepoint_check_flag)
-  { }
-};
-
-
-//////////////////////////////////////////////////////////////////
-//  Concurrent Mark-Sweep Generation /////////////////////////////
-//////////////////////////////////////////////////////////////////
-
-NOT_PRODUCT(CompactibleFreeListSpace* debug_cms_space;)
-
-// This struct contains per-thread things necessary to support parallel
-// young-gen collection.
-class CMSParGCThreadState: public CHeapObj<mtGC> {
- public:
-  CompactibleFreeListSpaceLAB lab;
-  PromotionInfo promo;
-
-  // Constructor.
-  CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) {
-    promo.setSpace(cfls);
-  }
-};
-
-ConcurrentMarkSweepGeneration::ConcurrentMarkSweepGeneration(
-     ReservedSpace rs,
-     size_t initial_byte_size,
-     size_t min_byte_size,
-     size_t max_byte_size,
-     CardTableRS* ct) :
-  CardGeneration(rs, initial_byte_size, ct),
-  _dilatation_factor(((double)MinChunkSize)/((double)(CollectedHeap::min_fill_size()))),
-  _did_compact(false)
-{
-  HeapWord* bottom = (HeapWord*) _virtual_space.low();
-  HeapWord* end    = (HeapWord*) _virtual_space.high();
-
-  _direct_allocated_words = 0;
-  NOT_PRODUCT(
-    _numObjectsPromoted = 0;
-    _numWordsPromoted = 0;
-    _numObjectsAllocated = 0;
-    _numWordsAllocated = 0;
-  )
-
-  _cmsSpace = new CompactibleFreeListSpace(_bts, MemRegion(bottom, end));
-  NOT_PRODUCT(debug_cms_space = _cmsSpace;)
-  _cmsSpace->_old_gen = this;
-
-  _gc_stats = new CMSGCStats();
-
-  // Verify the assumption that FreeChunk::_prev and OopDesc::_klass
-  // offsets match. The ability to tell free chunks from objects
-  // depends on this property.
-  debug_only(
-    FreeChunk* junk = NULL;
-    assert(UseCompressedClassPointers ||
-           junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
-           "Offset of FreeChunk::_prev within FreeChunk must match"
-           "  that of OopDesc::_klass within OopDesc");
-  )
-
-  _par_gc_thread_states = NEW_C_HEAP_ARRAY(CMSParGCThreadState*, ParallelGCThreads, mtGC);
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _par_gc_thread_states[i] = new CMSParGCThreadState(cmsSpace());
-  }
-
-  _incremental_collection_failed = false;
-  // The "dilatation_factor" is the expansion that can occur on
-  // account of the fact that the minimum object size in the CMS
-  // generation may be larger than that in, say, a contiguous young
-  //  generation.
-  // Ideally, in the calculation below, we'd compute the dilatation
-  // factor as: MinChunkSize/(promoting_gen's min object size)
-  // Since we do not have such a general query interface for the
-  // promoting generation, we'll instead just use the minimum
-  // object size (which today is a header's worth of space);
-  // note that all arithmetic is in units of HeapWords.
-  assert(MinChunkSize >= CollectedHeap::min_fill_size(), "just checking");
-  assert(_dilatation_factor >= 1.0, "from previous assert");
-
-  initialize_performance_counters(min_byte_size, max_byte_size);
-}
-
-
-// The field "_initiating_occupancy" represents the occupancy percentage
-// at which we trigger a new collection cycle.  Unless explicitly specified
-// via CMSInitiatingOccupancyFraction (argument "io" below), it
-// is calculated by:
-//
-//   Let "f" be MinHeapFreeRatio in
-//
-//    _initiating_occupancy = 100-f +
-//                           f * (CMSTriggerRatio/100)
-//   where CMSTriggerRatio is the argument "tr" below.
-//
-// That is, if we assume the heap is at its desired maximum occupancy at the
-// end of a collection, we let CMSTriggerRatio of the (purported) free
-// space be allocated before initiating a new collection cycle.
-//
-void ConcurrentMarkSweepGeneration::init_initiating_occupancy(intx io, uintx tr) {
-  assert(io <= 100 && tr <= 100, "Check the arguments");
-  if (io >= 0) {
-    _initiating_occupancy = (double)io / 100.0;
-  } else {
-    _initiating_occupancy = ((100 - MinHeapFreeRatio) +
-                             (double)(tr * MinHeapFreeRatio) / 100.0)
-                            / 100.0;
-  }
-}
-
-void ConcurrentMarkSweepGeneration::ref_processor_init() {
-  assert(collector() != NULL, "no collector");
-  collector()->ref_processor_init();
-}
-
-void CMSCollector::ref_processor_init() {
-  if (_ref_processor == NULL) {
-    // Allocate and initialize a reference processor
-    _ref_processor =
-      new ReferenceProcessor(&_span_based_discoverer,
-                             (ParallelGCThreads > 1) && ParallelRefProcEnabled, // mt processing
-                             ParallelGCThreads,                      // mt processing degree
-                             _cmsGen->refs_discovery_is_mt(),        // mt discovery
-                             MAX2(ConcGCThreads, ParallelGCThreads), // mt discovery degree
-                             _cmsGen->refs_discovery_is_atomic(),    // discovery is not atomic
-                             &_is_alive_closure,                     // closure for liveness info
-                             false);                                 // disable adjusting number of processing threads
-    // Initialize the _ref_processor field of CMSGen
-    _cmsGen->set_ref_processor(_ref_processor);
-
-  }
-}
-
-AdaptiveSizePolicy* CMSCollector::size_policy() {
-  return CMSHeap::heap()->size_policy();
-}
-
-void ConcurrentMarkSweepGeneration::initialize_performance_counters(size_t min_old_size,
-                                                                    size_t max_old_size) {
-
-  const char* gen_name = "old";
-  // Generation Counters - generation 1, 1 subspace
-  _gen_counters = new GenerationCounters(gen_name, 1, 1,
-      min_old_size, max_old_size, &_virtual_space);
-
-  _space_counters = new GSpaceCounters(gen_name, 0,
-                                       _virtual_space.reserved_size(),
-                                       this, _gen_counters);
-}
-
-CMSStats::CMSStats(ConcurrentMarkSweepGeneration* cms_gen, unsigned int alpha):
-  _cms_gen(cms_gen)
-{
-  assert(alpha <= 100, "bad value");
-  _saved_alpha = alpha;
-
-  // Initialize the alphas to the bootstrap value of 100.
-  _gc0_alpha = _cms_alpha = 100;
-
-  _cms_begin_time.update();
-  _cms_end_time.update();
-
-  _gc0_duration = 0.0;
-  _gc0_period = 0.0;
-  _gc0_promoted = 0;
-
-  _cms_duration = 0.0;
-  _cms_period = 0.0;
-  _cms_allocated = 0;
-
-  _cms_used_at_gc0_begin = 0;
-  _cms_used_at_gc0_end = 0;
-  _allow_duty_cycle_reduction = false;
-  _valid_bits = 0;
-}
-
-double CMSStats::cms_free_adjustment_factor(size_t free) const {
-  // TBD: CR 6909490
-  return 1.0;
-}
-
-void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
-}
-
-// If promotion failure handling is on use
-// the padded average size of the promotion for each
-// young generation collection.
-double CMSStats::time_until_cms_gen_full() const {
-  size_t cms_free = _cms_gen->cmsSpace()->free();
-  CMSHeap* heap = CMSHeap::heap();
-  size_t expected_promotion = MIN2(heap->young_gen()->capacity(),
-                                   (size_t) _cms_gen->gc_stats()->avg_promoted()->padded_average());
-  if (cms_free > expected_promotion) {
-    // Start a cms collection if there isn't enough space to promote
-    // for the next young collection.  Use the padded average as
-    // a safety factor.
-    cms_free -= expected_promotion;
-
-    // Adjust by the safety factor.
-    double cms_free_dbl = (double)cms_free;
-    double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor) / 100.0;
-    // Apply a further correction factor which tries to adjust
-    // for recent occurance of concurrent mode failures.
-    cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
-    cms_free_dbl = cms_free_dbl * cms_adjustment;
-
-    log_trace(gc)("CMSStats::time_until_cms_gen_full: cms_free " SIZE_FORMAT " expected_promotion " SIZE_FORMAT,
-                  cms_free, expected_promotion);
-    log_trace(gc)("  cms_free_dbl %f cms_consumption_rate %f", cms_free_dbl, cms_consumption_rate() + 1.0);
-    // Add 1 in case the consumption rate goes to zero.
-    return cms_free_dbl / (cms_consumption_rate() + 1.0);
-  }
-  return 0.0;
-}
-
-// Compare the duration of the cms collection to the
-// time remaining before the cms generation is empty.
-// Note that the time from the start of the cms collection
-// to the start of the cms sweep (less than the total
-// duration of the cms collection) can be used.  This
-// has been tried and some applications experienced
-// promotion failures early in execution.  This was
-// possibly because the averages were not accurate
-// enough at the beginning.
-double CMSStats::time_until_cms_start() const {
-  // We add "gc0_period" to the "work" calculation
-  // below because this query is done (mostly) at the
-  // end of a scavenge, so we need to conservatively
-  // account for that much possible delay
-  // in the query so as to avoid concurrent mode failures
-  // due to starting the collection just a wee bit too
-  // late.
-  double work = cms_duration() + gc0_period();
-  double deadline = time_until_cms_gen_full();
-  // If a concurrent mode failure occurred recently, we want to be
-  // more conservative and halve our expected time_until_cms_gen_full()
-  if (work > deadline) {
-    log_develop_trace(gc)("CMSCollector: collect because of anticipated promotion before full %3.7f + %3.7f > %3.7f ",
-                          cms_duration(), gc0_period(), time_until_cms_gen_full());
-    return 0.0;
-  }
-  return work - deadline;
-}
-
-#ifndef PRODUCT
-void CMSStats::print_on(outputStream *st) const {
-  st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
-  st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
-               gc0_duration(), gc0_period(), gc0_promoted());
-  st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
-            cms_duration(), cms_period(), cms_allocated());
-  st->print(",cms_since_beg=%g,cms_since_end=%g",
-            cms_time_since_begin(), cms_time_since_end());
-  st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
-            _cms_used_at_gc0_begin, _cms_used_at_gc0_end);
-
-  if (valid()) {
-    st->print(",promo_rate=%g,cms_alloc_rate=%g",
-              promotion_rate(), cms_allocation_rate());
-    st->print(",cms_consumption_rate=%g,time_until_full=%g",
-              cms_consumption_rate(), time_until_cms_gen_full());
-  }
-  st->cr();
-}
-#endif // #ifndef PRODUCT
-
-CMSCollector::CollectorState CMSCollector::_collectorState =
-                             CMSCollector::Idling;
-bool CMSCollector::_foregroundGCIsActive = false;
-bool CMSCollector::_foregroundGCShouldWait = false;
-
-CMSCollector::CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
-                           CardTableRS*                   ct):
-  _overflow_list(NULL),
-  _conc_workers(NULL),     // may be set later
-  _completed_initialization(false),
-  _collection_count_start(0),
-  _should_unload_classes(CMSClassUnloadingEnabled),
-  _concurrent_cycles_since_last_unload(0),
-  _roots_scanning_options(GenCollectedHeap::SO_None),
-  _verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
-  _verifying(false),
-  _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
-  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
-  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
-  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
-  _cms_start_registered(false),
-  _cmsGen(cmsGen),
-  // Adjust span to cover old (cms) gen
-  _span(cmsGen->reserved()),
-  _ct(ct),
-  _markBitMap(0, Mutex::leaf + 1, "CMS_markBitMap_lock"),
-  _modUnionTable((CardTable::card_shift - LogHeapWordSize),
-                 -1 /* lock-free */, "No_lock" /* dummy */),
-  _restart_addr(NULL),
-  _ser_pmc_preclean_ovflw(0),
-  _ser_pmc_remark_ovflw(0),
-  _par_pmc_remark_ovflw(0),
-  _ser_kac_preclean_ovflw(0),
-  _ser_kac_ovflw(0),
-  _par_kac_ovflw(0),
-#ifndef PRODUCT
-  _num_par_pushes(0),
-#endif
-  _span_based_discoverer(_span),
-  _ref_processor(NULL),    // will be set later
-  // Construct the is_alive_closure with _span & markBitMap
-  _is_alive_closure(_span, &_markBitMap),
-  _modUnionClosurePar(&_modUnionTable),
-  _between_prologue_and_epilogue(false),
-  _abort_preclean(false),
-  _start_sampling(false),
-  _stats(cmsGen),
-  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
-                             //verify that this lock should be acquired with safepoint check.
-                             Monitor::_safepoint_check_never)),
-  _eden_chunk_array(NULL),     // may be set in ctor body
-  _eden_chunk_index(0),        // -- ditto --
-  _eden_chunk_capacity(0),     // -- ditto --
-  _survivor_chunk_array(NULL), // -- ditto --
-  _survivor_chunk_index(0),    // -- ditto --
-  _survivor_chunk_capacity(0), // -- ditto --
-  _survivor_plab_array(NULL)   // -- ditto --
-{
-  // Now expand the span and allocate the collection support structures
-  // (MUT, marking bit map etc.) to cover both generations subject to
-  // collection.
-
-  // For use by dirty card to oop closures.
-  _cmsGen->cmsSpace()->set_collector(this);
-
-  // Allocate MUT and marking bit map
-  {
-    MutexLocker x(_markBitMap.lock(), Mutex::_no_safepoint_check_flag);
-    if (!_markBitMap.allocate(_span)) {
-      log_warning(gc)("Failed to allocate CMS Bit Map");
-      return;
-    }
-    assert(_markBitMap.covers(_span), "_markBitMap inconsistency?");
-  }
-  {
-    _modUnionTable.allocate(_span);
-    assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?");
-  }
-
-  if (!_markStack.allocate(MarkStackSize)) {
-    log_warning(gc)("Failed to allocate CMS Marking Stack");
-    return;
-  }
-
-  // Support for multi-threaded concurrent phases
-  if (CMSConcurrentMTEnabled) {
-    if (FLAG_IS_DEFAULT(ConcGCThreads)) {
-      // just for now
-      FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3) / 4);
-    }
-    if (ConcGCThreads > 1) {
-      _conc_workers = new YieldingFlexibleWorkGang("CMS Thread",
-                                 ConcGCThreads, true);
-      if (_conc_workers == NULL) {
-        log_warning(gc)("GC/CMS: _conc_workers allocation failure: forcing -CMSConcurrentMTEnabled");
-        CMSConcurrentMTEnabled = false;
-      } else {
-        _conc_workers->initialize_workers();
-      }
-    } else {
-      CMSConcurrentMTEnabled = false;
-    }
-  }
-  if (!CMSConcurrentMTEnabled) {
-    ConcGCThreads = 0;
-  } else {
-    // Turn off CMSCleanOnEnter optimization temporarily for
-    // the MT case where it's not fixed yet; see 6178663.
-    CMSCleanOnEnter = false;
-  }
-  assert((_conc_workers != NULL) == (ConcGCThreads > 1),
-         "Inconsistency");
-  log_debug(gc)("ConcGCThreads: %u", ConcGCThreads);
-  log_debug(gc)("ParallelGCThreads: %u", ParallelGCThreads);
-
-  // Parallel task queues; these are shared for the
-  // concurrent and stop-world phases of CMS, but
-  // are not shared with parallel scavenge (ParNew).
-  {
-    uint i;
-    uint num_queues = MAX2(ParallelGCThreads, ConcGCThreads);
-
-    if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled
-         || ParallelRefProcEnabled)
-        && num_queues > 0) {
-      _task_queues = new OopTaskQueueSet(num_queues);
-      if (_task_queues == NULL) {
-        log_warning(gc)("task_queues allocation failure.");
-        return;
-      }
-      typedef Padded<OopTaskQueue> PaddedOopTaskQueue;
-      for (i = 0; i < num_queues; i++) {
-        PaddedOopTaskQueue *q = new PaddedOopTaskQueue();
-        if (q == NULL) {
-          log_warning(gc)("work_queue allocation failure.");
-          return;
-        }
-        _task_queues->register_queue(i, q);
-      }
-      for (i = 0; i < num_queues; i++) {
-        _task_queues->queue(i)->initialize();
-      }
-    }
-  }
-
-  _cmsGen ->init_initiating_occupancy(CMSInitiatingOccupancyFraction, CMSTriggerRatio);
-
-  // Clip CMSBootstrapOccupancy between 0 and 100.
-  _bootstrap_occupancy = CMSBootstrapOccupancy / 100.0;
-
-  // Now tell CMS generations the identity of their collector
-  ConcurrentMarkSweepGeneration::set_collector(this);
-
-  // Create & start a CMS thread for this CMS collector
-  _cmsThread = ConcurrentMarkSweepThread::start(this);
-  assert(cmsThread() != NULL, "CMS Thread should have been created");
-  assert(cmsThread()->collector() == this,
-         "CMS Thread should refer to this gen");
-  assert(CGC_lock != NULL, "Where's the CGC_lock?");
-
-  // Support for parallelizing young gen rescan
-  CMSHeap* heap = CMSHeap::heap();
-  _young_gen = heap->young_gen();
-  if (heap->supports_inline_contig_alloc()) {
-    _top_addr = heap->top_addr();
-    _end_addr = heap->end_addr();
-    assert(_young_gen != NULL, "no _young_gen");
-    _eden_chunk_index = 0;
-    _eden_chunk_capacity = (_young_gen->max_capacity() + CMSSamplingGrain) / CMSSamplingGrain;
-    _eden_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, _eden_chunk_capacity, mtGC);
-  }
-
-  // Support for parallelizing survivor space rescan
-  if ((CMSParallelRemarkEnabled && CMSParallelSurvivorRemarkEnabled) || CMSParallelInitialMarkEnabled) {
-    const size_t max_plab_samples =
-      _young_gen->max_survivor_size() / (PLAB::min_size() * HeapWordSize);
-
-    _survivor_plab_array  = NEW_C_HEAP_ARRAY(ChunkArray, ParallelGCThreads, mtGC);
-    _survivor_chunk_array = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
-    _cursor               = NEW_C_HEAP_ARRAY(size_t, ParallelGCThreads, mtGC);
-    _survivor_chunk_capacity = max_plab_samples;
-    for (uint i = 0; i < ParallelGCThreads; i++) {
-      HeapWord** vec = NEW_C_HEAP_ARRAY(HeapWord*, max_plab_samples, mtGC);
-      ChunkArray* cur = ::new (&_survivor_plab_array[i]) ChunkArray(vec, max_plab_samples);
-      assert(cur->end() == 0, "Should be 0");
-      assert(cur->array() == vec, "Should be vec");
-      assert(cur->capacity() == max_plab_samples, "Error");
-    }
-  }
-
-  NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
-  _gc_counters = new CollectorCounters("CMS full collection pauses", 1);
-  _cgc_counters = new CollectorCounters("CMS concurrent cycle pauses", 2);
-  _completed_initialization = true;
-  _inter_sweep_timer.start();  // start of time
-}
-
-const char* ConcurrentMarkSweepGeneration::name() const {
-  return "concurrent mark-sweep generation";
-}
-void ConcurrentMarkSweepGeneration::update_counters() {
-  if (UsePerfData) {
-    _space_counters->update_all();
-    _gen_counters->update_all();
-  }
-}
-
-// this is an optimized version of update_counters(). it takes the
-// used value as a parameter rather than computing it.
-//
-void ConcurrentMarkSweepGeneration::update_counters(size_t used) {
-  if (UsePerfData) {
-    _space_counters->update_used(used);
-    _space_counters->update_capacity();
-    _gen_counters->update_all();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::print() const {
-  Generation::print();
-  cmsSpace()->print();
-}
-
-#ifndef PRODUCT
-void ConcurrentMarkSweepGeneration::print_statistics() {
-  cmsSpace()->printFLCensus(0);
-}
-#endif
-
-size_t
-ConcurrentMarkSweepGeneration::contiguous_available() const {
-  // dld proposes an improvement in precision here. If the committed
-  // part of the space ends in a free block we should add that to
-  // uncommitted size in the calculation below. Will make this
-  // change later, staying with the approximation below for the
-  // time being. -- ysr.
-  return MAX2(_virtual_space.uncommitted_size(), unsafe_max_alloc_nogc());
-}
-
-size_t
-ConcurrentMarkSweepGeneration::unsafe_max_alloc_nogc() const {
-  return _cmsSpace->max_alloc_in_words() * HeapWordSize;
-}
-
-size_t ConcurrentMarkSweepGeneration::used_stable() const {
-  return cmsSpace()->used_stable();
-}
-
-size_t ConcurrentMarkSweepGeneration::max_available() const {
-  return free() + _virtual_space.uncommitted_size();
-}
-
-bool ConcurrentMarkSweepGeneration::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
-  size_t available = max_available();
-  size_t av_promo  = (size_t)gc_stats()->avg_promoted()->padded_average();
-  bool   res = (available >= av_promo) || (available >= max_promotion_in_bytes);
-  log_trace(gc, promotion)("CMS: promo attempt is%s safe: available(" SIZE_FORMAT ") %s av_promo(" SIZE_FORMAT "), max_promo(" SIZE_FORMAT ")",
-                           res? "":" not", available, res? ">=":"<", av_promo, max_promotion_in_bytes);
-  return res;
-}
-
-// At a promotion failure dump information on block layout in heap
-// (cms old generation).
-void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
-  Log(gc, promotion) log;
-  if (log.is_trace()) {
-    LogStream ls(log.trace());
-    cmsSpace()->dump_at_safepoint_with_locks(collector(), &ls);
-  }
-}
-
-void ConcurrentMarkSweepGeneration::reset_after_compaction() {
-  // Clear the promotion information.  These pointers can be adjusted
-  // along with all the other pointers into the heap but
-  // compaction is expected to be a rare event with
-  // a heap using cms so don't do it without seeing the need.
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _par_gc_thread_states[i]->promo.reset();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::compute_new_size() {
-  assert_locked_or_safepoint(Heap_lock);
-
-  // If incremental collection failed, we just want to expand
-  // to the limit.
-  if (incremental_collection_failed()) {
-    clear_incremental_collection_failed();
-    grow_to_reserved();
-    return;
-  }
-
-  // The heap has been compacted but not reset yet.
-  // Any metric such as free() or used() will be incorrect.
-
-  CardGeneration::compute_new_size();
-
-  // Reset again after a possible resizing
-  if (did_compact()) {
-    cmsSpace()->reset_after_compaction();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
-  assert_locked_or_safepoint(Heap_lock);
-
-  // If incremental collection failed, we just want to expand
-  // to the limit.
-  if (incremental_collection_failed()) {
-    clear_incremental_collection_failed();
-    grow_to_reserved();
-    return;
-  }
-
-  double free_percentage = ((double) free()) / capacity();
-  double desired_free_percentage = (double) MinHeapFreeRatio / 100;
-  double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
-
-  // compute expansion delta needed for reaching desired free percentage
-  if (free_percentage < desired_free_percentage) {
-    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-    assert(desired_capacity >= capacity(), "invalid expansion size");
-    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
-    Log(gc) log;
-    if (log.is_trace()) {
-      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-      log.trace("From compute_new_size: ");
-      log.trace("  Free fraction %f", free_percentage);
-      log.trace("  Desired free fraction %f", desired_free_percentage);
-      log.trace("  Maximum free fraction %f", maximum_free_percentage);
-      log.trace("  Capacity " SIZE_FORMAT, capacity() / 1000);
-      log.trace("  Desired capacity " SIZE_FORMAT, desired_capacity / 1000);
-      CMSHeap* heap = CMSHeap::heap();
-      size_t young_size = heap->young_gen()->capacity();
-      log.trace("  Young gen size " SIZE_FORMAT, young_size / 1000);
-      log.trace("  unsafe_max_alloc_nogc " SIZE_FORMAT, unsafe_max_alloc_nogc() / 1000);
-      log.trace("  contiguous available " SIZE_FORMAT, contiguous_available() / 1000);
-      log.trace("  Expand by " SIZE_FORMAT " (bytes)", expand_bytes);
-    }
-    // safe if expansion fails
-    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
-    log.trace("  Expanded free fraction %f", ((double) free()) / capacity());
-  } else {
-    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
-    assert(desired_capacity <= capacity(), "invalid expansion size");
-    size_t shrink_bytes = capacity() - desired_capacity;
-    // Don't shrink unless the delta is greater than the minimum shrink we want
-    if (shrink_bytes >= MinHeapDeltaBytes) {
-      shrink_free_list_by(shrink_bytes);
-    }
-  }
-}
-
-Mutex* ConcurrentMarkSweepGeneration::freelistLock() const {
-  return cmsSpace()->freelistLock();
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::allocate(size_t size, bool tlab) {
-  CMSSynchronousYieldRequest yr;
-  MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
-  return have_lock_and_allocate(size, tlab);
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::have_lock_and_allocate(size_t size,
-                                                                bool   tlab /* ignored */) {
-  assert_lock_strong(freelistLock());
-  size_t adjustedSize = CompactibleFreeListSpace::adjustObjectSize(size);
-  HeapWord* res = cmsSpace()->allocate(adjustedSize);
-  // Allocate the object live (grey) if the background collector has
-  // started marking. This is necessary because the marker may
-  // have passed this address and consequently this object will
-  // not otherwise be greyed and would be incorrectly swept up.
-  // Note that if this object contains references, the writing
-  // of those references will dirty the card containing this object
-  // allowing the object to be blackened (and its references scanned)
-  // either during a preclean phase or at the final checkpoint.
-  if (res != NULL) {
-    // We may block here with an uninitialized object with
-    // its mark-bit or P-bits not yet set. Such objects need
-    // to be safely navigable by block_start().
-    assert(oop(res)->klass_or_null() == NULL, "Object should be uninitialized here.");
-    assert(!((FreeChunk*)res)->is_free(), "Error, block will look free but show wrong size");
-    collector()->direct_allocated(res, adjustedSize);
-    _direct_allocated_words += adjustedSize;
-    // allocation counters
-    NOT_PRODUCT(
-      _numObjectsAllocated++;
-      _numWordsAllocated += (int)adjustedSize;
-    )
-  }
-  return res;
-}
-
-// In the case of direct allocation by mutators in a generation that
-// is being concurrently collected, the object must be allocated
-// live (grey) if the background collector has started marking.
-// This is necessary because the marker may
-// have passed this address and consequently this object will
-// not otherwise be greyed and would be incorrectly swept up.
-// Note that if this object contains references, the writing
-// of those references will dirty the card containing this object
-// allowing the object to be blackened (and its references scanned)
-// either during a preclean phase or at the final checkpoint.
-void CMSCollector::direct_allocated(HeapWord* start, size_t size) {
-  assert(_markBitMap.covers(start, size), "Out of bounds");
-  if (_collectorState >= Marking) {
-    MutexLocker y(_markBitMap.lock(),
-                  Mutex::_no_safepoint_check_flag);
-    // [see comments preceding SweepClosure::do_blk() below for details]
-    //
-    // Can the P-bits be deleted now?  JJJ
-    //
-    // 1. need to mark the object as live so it isn't collected
-    // 2. need to mark the 2nd bit to indicate the object may be uninitialized
-    // 3. need to mark the end of the object so marking, precleaning or sweeping
-    //    can skip over uninitialized or unparsable objects. An allocated
-    //    object is considered uninitialized for our purposes as long as
-    //    its klass word is NULL.  All old gen objects are parsable
-    //    as soon as they are initialized.)
-    _markBitMap.mark(start);          // object is live
-    _markBitMap.mark(start + 1);      // object is potentially uninitialized?
-    _markBitMap.mark(start + size - 1);
-                                      // mark end of object
-  }
-  // check that oop looks uninitialized
-  assert(oop(start)->klass_or_null() == NULL, "_klass should be NULL");
-}
-
-void CMSCollector::promoted(bool par, HeapWord* start,
-                            bool is_obj_array, size_t obj_size) {
-  assert(_markBitMap.covers(start), "Out of bounds");
-  // See comment in direct_allocated() about when objects should
-  // be allocated live.
-  if (_collectorState >= Marking) {
-    // we already hold the marking bit map lock, taken in
-    // the prologue
-    if (par) {
-      _markBitMap.par_mark(start);
-    } else {
-      _markBitMap.mark(start);
-    }
-    // We don't need to mark the object as uninitialized (as
-    // in direct_allocated above) because this is being done with the
-    // world stopped and the object will be initialized by the
-    // time the marking, precleaning or sweeping get to look at it.
-    // But see the code for copying objects into the CMS generation,
-    // where we need to ensure that concurrent readers of the
-    // block offset table are able to safely navigate a block that
-    // is in flux from being free to being allocated (and in
-    // transition while being copied into) and subsequently
-    // becoming a bona-fide object when the copy/promotion is complete.
-    assert(SafepointSynchronize::is_at_safepoint(),
-           "expect promotion only at safepoints");
-
-    if (_collectorState < Sweeping) {
-      // Mark the appropriate cards in the modUnionTable, so that
-      // this object gets scanned before the sweep. If this is
-      // not done, CMS generation references in the object might
-      // not get marked.
-      // For the case of arrays, which are otherwise precisely
-      // marked, we need to dirty the entire array, not just its head.
-      if (is_obj_array) {
-        // The [par_]mark_range() method expects mr.end() below to
-        // be aligned to the granularity of a bit's representation
-        // in the heap. In the case of the MUT below, that's a
-        // card size.
-        MemRegion mr(start,
-                     align_up(start + obj_size,
-                              CardTable::card_size /* bytes */));
-        if (par) {
-          _modUnionTable.par_mark_range(mr);
-        } else {
-          _modUnionTable.mark_range(mr);
-        }
-      } else {  // not an obj array; we can just mark the head
-        if (par) {
-          _modUnionTable.par_mark(start);
-        } else {
-          _modUnionTable.mark(start);
-        }
-      }
-    }
-  }
-}
-
-oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
-  assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
-  // allocate, copy and if necessary update promoinfo --
-  // delegate to underlying space.
-  assert_lock_strong(freelistLock());
-
-#ifndef PRODUCT
-  if (CMSHeap::heap()->promotion_should_fail()) {
-    return NULL;
-  }
-#endif  // #ifndef PRODUCT
-
-  oop res = _cmsSpace->promote(obj, obj_size);
-  if (res == NULL) {
-    // expand and retry
-    size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
-    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
-    // Since this is the old generation, we don't try to promote
-    // into a more senior generation.
-    res = _cmsSpace->promote(obj, obj_size);
-  }
-  if (res != NULL) {
-    // See comment in allocate() about when objects should
-    // be allocated live.
-    assert(oopDesc::is_oop(obj), "Will dereference klass pointer below");
-    collector()->promoted(false,           // Not parallel
-                          (HeapWord*)res, obj->is_objArray(), obj_size);
-    // promotion counters
-    NOT_PRODUCT(
-      _numObjectsPromoted++;
-      _numWordsPromoted +=
-        (int)(CompactibleFreeListSpace::adjustObjectSize(obj->size()));
-    )
-  }
-  return res;
-}
-
-
-// IMPORTANT: Notes on object size recognition in CMS.
-// ---------------------------------------------------
-// A block of storage in the CMS generation is always in
-// one of three states. A free block (FREE), an allocated
-// object (OBJECT) whose size() method reports the correct size,
-// and an intermediate state (TRANSIENT) in which its size cannot
-// be accurately determined.
-// STATE IDENTIFICATION:   (32 bit and 64 bit w/o COOPS)
-// -----------------------------------------------------
-// FREE:      klass_word & 1 == 1; mark_word holds block size
-//
-// OBJECT:    klass_word installed; klass_word != 0 && klass_word & 1 == 0;
-//            obj->size() computes correct size
-//
-// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
-//
-// STATE IDENTIFICATION: (64 bit+COOPS)
-// ------------------------------------
-// FREE:      mark_word & CMS_FREE_BIT == 1; mark_word & ~CMS_FREE_BIT gives block_size
-//
-// OBJECT:    klass_word installed; klass_word != 0;
-//            obj->size() computes correct size
-//
-// TRANSIENT: klass_word == 0; size is indeterminate until we become an OBJECT
-//
-//
-// STATE TRANSITION DIAGRAM
-//
-//        mut / parnew                     mut  /  parnew
-// FREE --------------------> TRANSIENT ---------------------> OBJECT --|
-//  ^                                                                   |
-//  |------------------------ DEAD <------------------------------------|
-//         sweep                            mut
-//
-// While a block is in TRANSIENT state its size cannot be determined
-// so readers will either need to come back later or stall until
-// the size can be determined. Note that for the case of direct
-// allocation, P-bits, when available, may be used to determine the
-// size of an object that may not yet have been initialized.
-
-// Things to support parallel young-gen collection.
-oop
-ConcurrentMarkSweepGeneration::par_promote(int thread_num,
-                                           oop old, markWord m,
-                                           size_t word_sz) {
-#ifndef PRODUCT
-  if (CMSHeap::heap()->promotion_should_fail()) {
-    return NULL;
-  }
-#endif  // #ifndef PRODUCT
-
-  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
-  PromotionInfo* promoInfo = &ps->promo;
-  // if we are tracking promotions, then first ensure space for
-  // promotion (including spooling space for saving header if necessary).
-  // then allocate and copy, then track promoted info if needed.
-  // When tracking (see PromotionInfo::track()), the mark word may
-  // be displaced and in this case restoration of the mark word
-  // occurs in the (oop_since_save_marks_)iterate phase.
-  if (promoInfo->tracking() && !promoInfo->ensure_spooling_space()) {
-    // Out of space for allocating spooling buffers;
-    // try expanding and allocating spooling buffers.
-    if (!expand_and_ensure_spooling_space(promoInfo)) {
-      return NULL;
-    }
-  }
-  assert(!promoInfo->tracking() || promoInfo->has_spooling_space(), "Control point invariant");
-  const size_t alloc_sz = CompactibleFreeListSpace::adjustObjectSize(word_sz);
-  HeapWord* obj_ptr = ps->lab.alloc(alloc_sz);
-  if (obj_ptr == NULL) {
-     obj_ptr = expand_and_par_lab_allocate(ps, alloc_sz);
-     if (obj_ptr == NULL) {
-       return NULL;
-     }
-  }
-  oop obj = oop(obj_ptr);
-  OrderAccess::storestore();
-  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
-  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
-  // IMPORTANT: See note on object initialization for CMS above.
-  // Otherwise, copy the object.  Here we must be careful to insert the
-  // klass pointer last, since this marks the block as an allocated object.
-  // Except with compressed oops it's the mark word.
-  HeapWord* old_ptr = (HeapWord*)old;
-  // Restore the mark word copied above.
-  obj->set_mark_raw(m);
-  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
-  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
-  OrderAccess::storestore();
-
-  if (UseCompressedClassPointers) {
-    // Copy gap missed by (aligned) header size calculation below
-    obj->set_klass_gap(old->klass_gap());
-  }
-  if (word_sz > (size_t)oopDesc::header_size()) {
-    Copy::aligned_disjoint_words(old_ptr + oopDesc::header_size(),
-                                 obj_ptr + oopDesc::header_size(),
-                                 word_sz - oopDesc::header_size());
-  }
-
-  // Now we can track the promoted object, if necessary.  We take care
-  // to delay the transition from uninitialized to full object
-  // (i.e., insertion of klass pointer) until after, so that it
-  // atomically becomes a promoted object.
-  if (promoInfo->tracking()) {
-    promoInfo->track((PromotedObject*)obj, old->klass());
-  }
-  assert(obj->klass_or_null() == NULL, "Object should be uninitialized here.");
-  assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
-  assert(oopDesc::is_oop(old), "Will use and dereference old klass ptr below");
-
-  // Finally, install the klass pointer (this should be volatile).
-  OrderAccess::storestore();
-  obj->set_klass(old->klass());
-  // We should now be able to calculate the right size for this object
-  assert(oopDesc::is_oop(obj) && obj->size() == (int)word_sz, "Error, incorrect size computed for promoted object");
-
-  collector()->promoted(true,          // parallel
-                        obj_ptr, old->is_objArray(), word_sz);
-
-  NOT_PRODUCT(
-    Atomic::inc(&_numObjectsPromoted);
-    Atomic::add(alloc_sz, &_numWordsPromoted);
-  )
-
-  return obj;
-}
-
-void
-ConcurrentMarkSweepGeneration::
-par_promote_alloc_done(int thread_num) {
-  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
-  ps->lab.retire(thread_num);
-}
-
-void
-ConcurrentMarkSweepGeneration::
-par_oop_since_save_marks_iterate_done(int thread_num) {
-  CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
-  ParScanWithoutBarrierClosure* dummy_cl = NULL;
-  ps->promo.promoted_oops_iterate(dummy_cl);
-
-  // Because card-scanning has been completed, subsequent phases
-  // (e.g., reference processing) will not need to recognize which
-  // objects have been promoted during this GC. So, we can now disable
-  // promotion tracking.
-  ps->promo.stopTrackingPromotions();
-}
-
-bool ConcurrentMarkSweepGeneration::should_collect(bool   full,
-                                                   size_t size,
-                                                   bool   tlab)
-{
-  // We allow a STW collection only if a full
-  // collection was requested.
-  return full || should_allocate(size, tlab); // FIX ME !!!
-  // This and promotion failure handling are connected at the
-  // hip and should be fixed by untying them.
-}
-
-bool CMSCollector::shouldConcurrentCollect() {
-  LogTarget(Trace, gc) log;
-
-  if (_full_gc_requested) {
-    log.print("CMSCollector: collect because of explicit  gc request (or GCLocker)");
-    return true;
-  }
-
-  FreelistLocker x(this);
-  // ------------------------------------------------------------------
-  // Print out lots of information which affects the initiation of
-  // a collection.
-  if (log.is_enabled() && stats().valid()) {
-    log.print("CMSCollector shouldConcurrentCollect: ");
-
-    LogStream out(log);
-    stats().print_on(&out);
-
-    log.print("time_until_cms_gen_full %3.7f", stats().time_until_cms_gen_full());
-    log.print("free=" SIZE_FORMAT, _cmsGen->free());
-    log.print("contiguous_available=" SIZE_FORMAT, _cmsGen->contiguous_available());
-    log.print("promotion_rate=%g", stats().promotion_rate());
-    log.print("cms_allocation_rate=%g", stats().cms_allocation_rate());
-    log.print("occupancy=%3.7f", _cmsGen->occupancy());
-    log.print("initiatingOccupancy=%3.7f", _cmsGen->initiating_occupancy());
-    log.print("cms_time_since_begin=%3.7f", stats().cms_time_since_begin());
-    log.print("cms_time_since_end=%3.7f", stats().cms_time_since_end());
-    log.print("metadata initialized %d", MetaspaceGC::should_concurrent_collect());
-  }
-  // ------------------------------------------------------------------
-
-  // If the estimated time to complete a cms collection (cms_duration())
-  // is less than the estimated time remaining until the cms generation
-  // is full, start a collection.
-  if (!UseCMSInitiatingOccupancyOnly) {
-    if (stats().valid()) {
-      if (stats().time_until_cms_start() == 0.0) {
-        return true;
-      }
-    } else {
-      // We want to conservatively collect somewhat early in order
-      // to try and "bootstrap" our CMS/promotion statistics;
-      // this branch will not fire after the first successful CMS
-      // collection because the stats should then be valid.
-      if (_cmsGen->occupancy() >= _bootstrap_occupancy) {
-        log.print(" CMSCollector: collect for bootstrapping statistics: occupancy = %f, boot occupancy = %f",
-                  _cmsGen->occupancy(), _bootstrap_occupancy);
-        return true;
-      }
-    }
-  }
-
-  // Otherwise, we start a collection cycle if
-  // old gen want a collection cycle started. Each may use
-  // an appropriate criterion for making this decision.
-  // XXX We need to make sure that the gen expansion
-  // criterion dovetails well with this. XXX NEED TO FIX THIS
-  if (_cmsGen->should_concurrent_collect()) {
-    log.print("CMS old gen initiated");
-    return true;
-  }
-
-  // We start a collection if we believe an incremental collection may fail;
-  // this is not likely to be productive in practice because it's probably too
-  // late anyway.
-  CMSHeap* heap = CMSHeap::heap();
-  if (heap->incremental_collection_will_fail(true /* consult_young */)) {
-    log.print("CMSCollector: collect because incremental collection will fail ");
-    return true;
-  }
-
-  if (MetaspaceGC::should_concurrent_collect()) {
-    log.print("CMSCollector: collect for metadata allocation ");
-    return true;
-  }
-
-  // CMSTriggerInterval starts a CMS cycle if enough time has passed.
-  if (CMSTriggerInterval >= 0) {
-    if (CMSTriggerInterval == 0) {
-      // Trigger always
-      return true;
-    }
-
-    // Check the CMS time since begin (we do not check the stats validity
-    // as we want to be able to trigger the first CMS cycle as well)
-    if (stats().cms_time_since_begin() >= (CMSTriggerInterval / ((double) MILLIUNITS))) {
-      if (stats().valid()) {
-        log.print("CMSCollector: collect because of trigger interval (time since last begin %3.7f secs)",
-                  stats().cms_time_since_begin());
-      } else {
-        log.print("CMSCollector: collect because of trigger interval (first collection)");
-      }
-      return true;
-    }
-  }
-
-  return false;
-}
-
-void CMSCollector::set_did_compact(bool v) { _cmsGen->set_did_compact(v); }
-
-// Clear _expansion_cause fields of constituent generations
-void CMSCollector::clear_expansion_cause() {
-  _cmsGen->clear_expansion_cause();
-}
-
-// We should be conservative in starting a collection cycle.  To
-// start too eagerly runs the risk of collecting too often in the
-// extreme.  To collect too rarely falls back on full collections,
-// which works, even if not optimum in terms of concurrent work.
-// As a work around for too eagerly collecting, use the flag
-// UseCMSInitiatingOccupancyOnly.  This also has the advantage of
-// giving the user an easily understandable way of controlling the
-// collections.
-// We want to start a new collection cycle if any of the following
-// conditions hold:
-// . our current occupancy exceeds the configured initiating occupancy
-//   for this generation, or
-// . we recently needed to expand this space and have not, since that
-//   expansion, done a collection of this generation, or
-// . the underlying space believes that it may be a good idea to initiate
-//   a concurrent collection (this may be based on criteria such as the
-//   following: the space uses linear allocation and linear allocation is
-//   going to fail, or there is believed to be excessive fragmentation in
-//   the generation, etc... or ...
-// [.(currently done by CMSCollector::shouldConcurrentCollect() only for
-//   the case of the old generation; see CR 6543076):
-//   we may be approaching a point at which allocation requests may fail because
-//   we will be out of sufficient free space given allocation rate estimates.]
-bool ConcurrentMarkSweepGeneration::should_concurrent_collect() const {
-
-  assert_lock_strong(freelistLock());
-  if (occupancy() > initiating_occupancy()) {
-    log_trace(gc)(" %s: collect because of occupancy %f / %f  ",
-                  short_name(), occupancy(), initiating_occupancy());
-    return true;
-  }
-  if (UseCMSInitiatingOccupancyOnly) {
-    return false;
-  }
-  if (expansion_cause() == CMSExpansionCause::_satisfy_allocation) {
-    log_trace(gc)(" %s: collect because expanded for allocation ", short_name());
-    return true;
-  }
-  return false;
-}
-
-void ConcurrentMarkSweepGeneration::collect(bool   full,
-                                            bool   clear_all_soft_refs,
-                                            size_t size,
-                                            bool   tlab)
-{
-  collector()->collect(full, clear_all_soft_refs, size, tlab);
-}
-
-void CMSCollector::collect(bool   full,
-                           bool   clear_all_soft_refs,
-                           size_t size,
-                           bool   tlab)
-{
-  // The following "if" branch is present for defensive reasons.
-  // In the current uses of this interface, it can be replaced with:
-  // assert(!GCLocker.is_active(), "Can't be called otherwise");
-  // But I am not placing that assert here to allow future
-  // generality in invoking this interface.
-  if (GCLocker::is_active()) {
-    // A consistency test for GCLocker
-    assert(GCLocker::needs_gc(), "Should have been set already");
-    // Skip this foreground collection, instead
-    // expanding the heap if necessary.
-    // Need the free list locks for the call to free() in compute_new_size()
-    compute_new_size();
-    return;
-  }
-  acquire_control_and_collect(full, clear_all_soft_refs);
-}
-
-void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
-  CMSHeap* heap = CMSHeap::heap();
-  unsigned int gc_count = heap->total_full_collections();
-  if (gc_count == full_gc_count) {
-    MutexLocker y(CGC_lock, Mutex::_no_safepoint_check_flag);
-    _full_gc_requested = true;
-    _full_gc_cause = cause;
-    CGC_lock->notify();   // nudge CMS thread
-  } else {
-    assert(gc_count > full_gc_count, "Error: causal loop");
-  }
-}
-
-bool CMSCollector::is_external_interruption() {
-  GCCause::Cause cause = CMSHeap::heap()->gc_cause();
-  return GCCause::is_user_requested_gc(cause) ||
-         GCCause::is_serviceability_requested_gc(cause);
-}
-
-void CMSCollector::report_concurrent_mode_interruption() {
-  if (is_external_interruption()) {
-    log_debug(gc)("Concurrent mode interrupted");
-  } else {
-    log_debug(gc)("Concurrent mode failure");
-    _gc_tracer_cm->report_concurrent_mode_failure();
-  }
-}
-
-
-// The foreground and background collectors need to coordinate in order
-// to make sure that they do not mutually interfere with CMS collections.
-// When a background collection is active,
-// the foreground collector may need to take over (preempt) and
-// synchronously complete an ongoing collection. Depending on the
-// frequency of the background collections and the heap usage
-// of the application, this preemption can be seldom or frequent.
-// There are only certain
-// points in the background collection that the "collection-baton"
-// can be passed to the foreground collector.
-//
-// The foreground collector will wait for the baton before
-// starting any part of the collection.  The foreground collector
-// will only wait at one location.
-//
-// The background collector will yield the baton before starting a new
-// phase of the collection (e.g., before initial marking, marking from roots,
-// precleaning, final re-mark, sweep etc.)  This is normally done at the head
-// of the loop which switches the phases. The background collector does some
-// of the phases (initial mark, final re-mark) with the world stopped.
-// Because of locking involved in stopping the world,
-// the foreground collector should not block waiting for the background
-// collector when it is doing a stop-the-world phase.  The background
-// collector will yield the baton at an additional point just before
-// it enters a stop-the-world phase.  Once the world is stopped, the
-// background collector checks the phase of the collection.  If the
-// phase has not changed, it proceeds with the collection.  If the
-// phase has changed, it skips that phase of the collection.  See
-// the comments on the use of the Heap_lock in collect_in_background().
-//
-// Variable used in baton passing.
-//   _foregroundGCIsActive - Set to true by the foreground collector when
-//      it wants the baton.  The foreground clears it when it has finished
-//      the collection.
-//   _foregroundGCShouldWait - Set to true by the background collector
-//        when it is running.  The foreground collector waits while
-//      _foregroundGCShouldWait is true.
-//  CGC_lock - monitor used to protect access to the above variables
-//      and to notify the foreground and background collectors.
-//  _collectorState - current state of the CMS collection.
-//
-// The foreground collector
-//   acquires the CGC_lock
-//   sets _foregroundGCIsActive
-//   waits on the CGC_lock for _foregroundGCShouldWait to be false
-//     various locks acquired in preparation for the collection
-//     are released so as not to block the background collector
-//     that is in the midst of a collection
-//   proceeds with the collection
-//   clears _foregroundGCIsActive
-//   returns
-//
-// The background collector in a loop iterating on the phases of the
-//      collection
-//   acquires the CGC_lock
-//   sets _foregroundGCShouldWait
-//   if _foregroundGCIsActive is set
-//     clears _foregroundGCShouldWait, notifies _CGC_lock
-//     waits on _CGC_lock for _foregroundGCIsActive to become false
-//     and exits the loop.
-//   otherwise
-//     proceed with that phase of the collection
-//     if the phase is a stop-the-world phase,
-//       yield the baton once more just before enqueueing
-//       the stop-world CMS operation (executed by the VM thread).
-//   returns after all phases of the collection are done
-//
-
-void CMSCollector::acquire_control_and_collect(bool full,
-        bool clear_all_soft_refs) {
-  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
-  assert(!Thread::current()->is_ConcurrentGC_thread(),
-         "shouldn't try to acquire control from self!");
-
-  // Start the protocol for acquiring control of the
-  // collection from the background collector (aka CMS thread).
-  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-         "VM thread should have CMS token");
-  // Remember the possibly interrupted state of an ongoing
-  // concurrent collection
-  CollectorState first_state = _collectorState;
-
-  // Signal to a possibly ongoing concurrent collection that
-  // we want to do a foreground collection.
-  _foregroundGCIsActive = true;
-
-  // release locks and wait for a notify from the background collector
-  // releasing the locks in only necessary for phases which
-  // do yields to improve the granularity of the collection.
-  assert_lock_strong(bitMapLock());
-  // We need to lock the Free list lock for the space that we are
-  // currently collecting.
-  assert(haveFreelistLocks(), "Must be holding free list locks");
-  bitMapLock()->unlock();
-  releaseFreelistLocks();
-  {
-    MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    if (_foregroundGCShouldWait) {
-      // We are going to be waiting for action for the CMS thread;
-      // it had better not be gone (for instance at shutdown)!
-      assert(ConcurrentMarkSweepThread::cmst() != NULL && !ConcurrentMarkSweepThread::cmst()->has_terminated(),
-             "CMS thread must be running");
-      // Wait here until the background collector gives us the go-ahead
-      ConcurrentMarkSweepThread::clear_CMS_flag(
-        ConcurrentMarkSweepThread::CMS_vm_has_token);  // release token
-      // Get a possibly blocked CMS thread going:
-      //   Note that we set _foregroundGCIsActive true above,
-      //   without protection of the CGC_lock.
-      CGC_lock->notify();
-      assert(!ConcurrentMarkSweepThread::vm_thread_wants_cms_token(),
-             "Possible deadlock");
-      while (_foregroundGCShouldWait) {
-        // wait for notification
-        CGC_lock->wait_without_safepoint_check();
-        // Possibility of delay/starvation here, since CMS token does
-        // not know to give priority to VM thread? Actually, i think
-        // there wouldn't be any delay/starvation, but the proof of
-        // that "fact" (?) appears non-trivial. XXX 20011219YSR
-      }
-      ConcurrentMarkSweepThread::set_CMS_flag(
-        ConcurrentMarkSweepThread::CMS_vm_has_token);
-    }
-  }
-  // The CMS_token is already held.  Get back the other locks.
-  assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
-         "VM thread should have CMS token");
-  getFreelistLocks();
-  bitMapLock()->lock_without_safepoint_check();
-  log_debug(gc, state)("CMS foreground collector has asked for control " INTPTR_FORMAT " with first state %d",
-                       p2i(Thread::current()), first_state);
-  log_debug(gc, state)("    gets control with state %d", _collectorState);
-
-  // Inform cms gen if this was due to partial collection failing.
-  // The CMS gen may use this fact to determine its expansion policy.
-  CMSHeap* heap = CMSHeap::heap();
-  if (heap->incremental_collection_will_fail(false /* don't consult_young */)) {
-    assert(!_cmsGen->incremental_collection_failed(),
-           "Should have been noticed, reacted to and cleared");
-    _cmsGen->set_incremental_collection_failed();
-  }
-
-  if (first_state > Idling) {
-    report_concurrent_mode_interruption();
-  }
-
-  set_did_compact(true);
-
-  // If the collection is being acquired from the background
-  // collector, there may be references on the discovered
-  // references lists.  Abandon those references, since some
-  // of them may have become unreachable after concurrent
-  // discovery; the STW compacting collector will redo discovery
-  // more precisely, without being subject to floating garbage.
-  // Leaving otherwise unreachable references in the discovered
-  // lists would require special handling.
-  ref_processor()->disable_discovery();
-  ref_processor()->abandon_partial_discovery();
-  ref_processor()->verify_no_references_recorded();
-
-  if (first_state > Idling) {
-    save_heap_summary();
-  }
-
-  do_compaction_work(clear_all_soft_refs);
-
-  // Has the GC time limit been exceeded?
-  size_t max_eden_size = _young_gen->max_eden_size();
-  GCCause::Cause gc_cause = heap->gc_cause();
-  size_policy()->check_gc_overhead_limit(_young_gen->eden()->used(),
-                                         _cmsGen->max_capacity(),
-                                         max_eden_size,
-                                         full,
-                                         gc_cause,
-                                         heap->soft_ref_policy());
-
-  // Reset the expansion cause, now that we just completed
-  // a collection cycle.
-  clear_expansion_cause();
-  _foregroundGCIsActive = false;
-  return;
-}
-
-// Resize the tenured generation
-// after obtaining the free list locks for the
-// two generations.
-void CMSCollector::compute_new_size() {
-  assert_locked_or_safepoint(Heap_lock);
-  FreelistLocker z(this);
-  MetaspaceGC::compute_new_size();
-  _cmsGen->compute_new_size_free_list();
-  // recalculate CMS used space after CMS collection
-  _cmsGen->cmsSpace()->recalculate_used_stable();
-}
-
-// A work method used by the foreground collector to do
-// a mark-sweep-compact.
-void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
-  CMSHeap* heap = CMSHeap::heap();
-
-  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
-  gc_timer->register_gc_start();
-
-  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
-  gc_tracer->report_gc_start(heap->gc_cause(), gc_timer->gc_start());
-
-  heap->pre_full_gc_dump(gc_timer);
-
-  GCTraceTime(Trace, gc, phases) t("CMS:MSC");
-
-  // Temporarily widen the span of the weak reference processing to
-  // the entire heap.
-  MemRegion new_span(CMSHeap::heap()->reserved_region());
-  ReferenceProcessorSpanMutator rp_mut_span(ref_processor(), new_span);
-  // Temporarily, clear the "is_alive_non_header" field of the
-  // reference processor.
-  ReferenceProcessorIsAliveMutator rp_mut_closure(ref_processor(), NULL);
-  // Temporarily make reference _processing_ single threaded (non-MT).
-  ReferenceProcessorMTProcMutator rp_mut_mt_processing(ref_processor(), false);
-  // Temporarily make refs discovery atomic
-  ReferenceProcessorAtomicMutator rp_mut_atomic(ref_processor(), true);
-  // Temporarily make reference _discovery_ single threaded (non-MT)
-  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
-
-  ref_processor()->set_enqueuing_is_done(false);
-  ref_processor()->enable_discovery();
-  ref_processor()->setup_policy(clear_all_soft_refs);
-  // If an asynchronous collection finishes, the _modUnionTable is
-  // all clear.  If we are assuming the collection from an asynchronous
-  // collection, clear the _modUnionTable.
-  assert(_collectorState != Idling || _modUnionTable.isAllClear(),
-    "_modUnionTable should be clear if the baton was not passed");
-  _modUnionTable.clear_all();
-  assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
-    "mod union for klasses should be clear if the baton was passed");
-  _ct->cld_rem_set()->clear_mod_union();
-
-
-  // We must adjust the allocation statistics being maintained
-  // in the free list space. We do so by reading and clearing
-  // the sweep timer and updating the block flux rate estimates below.
-  assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
-  if (_inter_sweep_timer.is_active()) {
-    _inter_sweep_timer.stop();
-    // Note that we do not use this sample to update the _inter_sweep_estimate.
-    _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
-                                            _inter_sweep_estimate.padded_average(),
-                                            _intra_sweep_estimate.padded_average());
-  }
-
-  GenMarkSweep::invoke_at_safepoint(ref_processor(), clear_all_soft_refs);
-  #ifdef ASSERT
-    CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace();
-    size_t free_size = cms_space->free();
-    assert(free_size ==
-           pointer_delta(cms_space->end(), cms_space->compaction_top())
-           * HeapWordSize,
-      "All the free space should be compacted into one chunk at top");
-    assert(cms_space->dictionary()->total_chunk_size(
-                                      debug_only(cms_space->freelistLock())) == 0 ||
-           cms_space->totalSizeInIndexedFreeLists() == 0,
-      "All the free space should be in a single chunk");
-    size_t num = cms_space->totalCount();
-    assert((free_size == 0 && num == 0) ||
-           (free_size > 0  && (num == 1 || num == 2)),
-         "There should be at most 2 free chunks after compaction");
-  #endif // ASSERT
-  _collectorState = Resetting;
-  assert(_restart_addr == NULL,
-         "Should have been NULL'd before baton was passed");
-  reset_stw();
-  _cmsGen->reset_after_compaction();
-  _concurrent_cycles_since_last_unload = 0;
-
-  // Clear any data recorded in the PLAB chunk arrays.
-  if (_survivor_plab_array != NULL) {
-    reset_survivor_plab_arrays();
-  }
-
-  // Adjust the per-size allocation stats for the next epoch.
-  _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
-  // Restart the "inter sweep timer" for the next epoch.
-  _inter_sweep_timer.reset();
-  _inter_sweep_timer.start();
-
-  // No longer a need to do a concurrent collection for Metaspace.
-  MetaspaceGC::set_should_concurrent_collect(false);
-
-  heap->post_full_gc_dump(gc_timer);
-
-  gc_timer->register_gc_end();
-
-  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
-
-  // For a mark-sweep-compact, compute_new_size() will be called
-  // in the heap's do_collection() method.
-}
-
-void CMSCollector::print_eden_and_survivor_chunk_arrays() {
-  Log(gc, heap) log;
-  if (!log.is_trace()) {
-    return;
-  }
-
-  ContiguousSpace* eden_space = _young_gen->eden();
-  ContiguousSpace* from_space = _young_gen->from();
-  ContiguousSpace* to_space   = _young_gen->to();
-  // Eden
-  if (_eden_chunk_array != NULL) {
-    log.trace("eden " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
-              p2i(eden_space->bottom()), p2i(eden_space->top()),
-              p2i(eden_space->end()), eden_space->capacity());
-    log.trace("_eden_chunk_index=" SIZE_FORMAT ", _eden_chunk_capacity=" SIZE_FORMAT,
-              _eden_chunk_index, _eden_chunk_capacity);
-    for (size_t i = 0; i < _eden_chunk_index; i++) {
-      log.trace("_eden_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_eden_chunk_array[i]));
-    }
-  }
-  // Survivor
-  if (_survivor_chunk_array != NULL) {
-    log.trace("survivor " PTR_FORMAT "-" PTR_FORMAT "-" PTR_FORMAT "(" SIZE_FORMAT ")",
-              p2i(from_space->bottom()), p2i(from_space->top()),
-              p2i(from_space->end()), from_space->capacity());
-    log.trace("_survivor_chunk_index=" SIZE_FORMAT ", _survivor_chunk_capacity=" SIZE_FORMAT,
-              _survivor_chunk_index, _survivor_chunk_capacity);
-    for (size_t i = 0; i < _survivor_chunk_index; i++) {
-      log.trace("_survivor_chunk_array[" SIZE_FORMAT "]=" PTR_FORMAT, i, p2i(_survivor_chunk_array[i]));
-    }
-  }
-}
-
-void CMSCollector::getFreelistLocks() const {
-  // Get locks for all free lists in all generations that this
-  // collector is responsible for
-  _cmsGen->freelistLock()->lock_without_safepoint_check();
-}
-
-void CMSCollector::releaseFreelistLocks() const {
-  // Release locks for all free lists in all generations that this
-  // collector is responsible for
-  _cmsGen->freelistLock()->unlock();
-}
-
-bool CMSCollector::haveFreelistLocks() const {
-  // Check locks for all free lists in all generations that this
-  // collector is responsible for
-  assert_lock_strong(_cmsGen->freelistLock());
-  PRODUCT_ONLY(ShouldNotReachHere());
-  return true;
-}
-
-// A utility class that is used by the CMS collector to
-// temporarily "release" the foreground collector from its
-// usual obligation to wait for the background collector to
-// complete an ongoing phase before proceeding.
-class ReleaseForegroundGC: public StackObj {
- private:
-  CMSCollector* _c;
- public:
-  ReleaseForegroundGC(CMSCollector* c) : _c(c) {
-    assert(_c->_foregroundGCShouldWait, "Else should not need to call");
-    MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    // allow a potentially blocked foreground collector to proceed
-    _c->_foregroundGCShouldWait = false;
-    if (_c->_foregroundGCIsActive) {
-      CGC_lock->notify();
-    }
-    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-           "Possible deadlock");
-  }
-
-  ~ReleaseForegroundGC() {
-    assert(!_c->_foregroundGCShouldWait, "Usage protocol violation?");
-    MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    _c->_foregroundGCShouldWait = true;
-  }
-};
-
-void CMSCollector::collect_in_background(GCCause::Cause cause) {
-  assert(Thread::current()->is_ConcurrentGC_thread(),
-    "A CMS asynchronous collection is only allowed on a CMS thread.");
-
-  CMSHeap* heap = CMSHeap::heap();
-  {
-    MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-    FreelistLocker fll(this);
-    MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    if (_foregroundGCIsActive) {
-      // The foreground collector is. Skip this
-      // background collection.
-      assert(!_foregroundGCShouldWait, "Should be clear");
-      return;
-    } else {
-      assert(_collectorState == Idling, "Should be idling before start.");
-      _collectorState = InitialMarking;
-      register_gc_start(cause);
-      // Reset the expansion cause, now that we are about to begin
-      // a new cycle.
-      clear_expansion_cause();
-
-      // Clear the MetaspaceGC flag since a concurrent collection
-      // is starting but also clear it after the collection.
-      MetaspaceGC::set_should_concurrent_collect(false);
-    }
-    // Decide if we want to enable class unloading as part of the
-    // ensuing concurrent GC cycle.
-    update_should_unload_classes();
-    _full_gc_requested = false;           // acks all outstanding full gc requests
-    _full_gc_cause = GCCause::_no_gc;
-    // Signal that we are about to start a collection
-    heap->increment_total_full_collections();  // ... starting a collection cycle
-    _collection_count_start = heap->total_full_collections();
-  }
-
-  size_t prev_used = _cmsGen->used();
-
-  // The change of the collection state is normally done at this level;
-  // the exceptions are phases that are executed while the world is
-  // stopped.  For those phases the change of state is done while the
-  // world is stopped.  For baton passing purposes this allows the
-  // background collector to finish the phase and change state atomically.
-  // The foreground collector cannot wait on a phase that is done
-  // while the world is stopped because the foreground collector already
-  // has the world stopped and would deadlock.
-  while (_collectorState != Idling) {
-    log_debug(gc, state)("Thread " INTPTR_FORMAT " in CMS state %d",
-                         p2i(Thread::current()), _collectorState);
-    // The foreground collector
-    //   holds the Heap_lock throughout its collection.
-    //   holds the CMS token (but not the lock)
-    //     except while it is waiting for the background collector to yield.
-    //
-    // The foreground collector should be blocked (not for long)
-    //   if the background collector is about to start a phase
-    //   executed with world stopped.  If the background
-    //   collector has already started such a phase, the
-    //   foreground collector is blocked waiting for the
-    //   Heap_lock.  The stop-world phases (InitialMarking and FinalMarking)
-    //   are executed in the VM thread.
-    //
-    // The locking order is
-    //   PendingListLock (PLL)  -- if applicable (FinalMarking)
-    //   Heap_lock  (both this & PLL locked in VM_CMS_Operation::prologue())
-    //   CMS token  (claimed in
-    //                stop_world_and_do() -->
-    //                  safepoint_synchronize() -->
-    //                    CMSThread::synchronize())
-
-    {
-      // Check if the FG collector wants us to yield.
-      CMSTokenSync x(true); // is cms thread
-      if (waitForForegroundGC()) {
-        // We yielded to a foreground GC, nothing more to be
-        // done this round.
-        assert(_foregroundGCShouldWait == false, "We set it to false in "
-               "waitForForegroundGC()");
-        log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
-                             p2i(Thread::current()), _collectorState);
-        return;
-      } else {
-        // The background collector can run but check to see if the
-        // foreground collector has done a collection while the
-        // background collector was waiting to get the CGC_lock
-        // above.  If yes, break so that _foregroundGCShouldWait
-        // is cleared before returning.
-        if (_collectorState == Idling) {
-          break;
-        }
-      }
-    }
-
-    assert(_foregroundGCShouldWait, "Foreground collector, if active, "
-      "should be waiting");
-
-    switch (_collectorState) {
-      case InitialMarking:
-        {
-          ReleaseForegroundGC x(this);
-          stats().record_cms_begin();
-          VM_CMS_Initial_Mark initial_mark_op(this);
-          VMThread::execute(&initial_mark_op);
-        }
-        // The collector state may be any legal state at this point
-        // since the background collector may have yielded to the
-        // foreground collector.
-        break;
-      case Marking:
-        // initial marking in checkpointRootsInitialWork has been completed
-        if (markFromRoots()) { // we were successful
-          assert(_collectorState == Precleaning, "Collector state should "
-            "have changed");
-        } else {
-          assert(_foregroundGCIsActive, "Internal state inconsistency");
-        }
-        break;
-      case Precleaning:
-        // marking from roots in markFromRoots has been completed
-        preclean();
-        assert(_collectorState == AbortablePreclean ||
-               _collectorState == FinalMarking,
-               "Collector state should have changed");
-        break;
-      case AbortablePreclean:
-        abortable_preclean();
-        assert(_collectorState == FinalMarking, "Collector state should "
-          "have changed");
-        break;
-      case FinalMarking:
-        {
-          ReleaseForegroundGC x(this);
-
-          VM_CMS_Final_Remark final_remark_op(this);
-          VMThread::execute(&final_remark_op);
-        }
-        assert(_foregroundGCShouldWait, "block post-condition");
-        break;
-      case Sweeping:
-        // final marking in checkpointRootsFinal has been completed
-        sweep();
-        assert(_collectorState == Resizing, "Collector state change "
-          "to Resizing must be done under the free_list_lock");
-
-      case Resizing: {
-        // Sweeping has been completed...
-        // At this point the background collection has completed.
-        // Don't move the call to compute_new_size() down
-        // into code that might be executed if the background
-        // collection was preempted.
-        {
-          ReleaseForegroundGC x(this);   // unblock FG collection
-          MutexLocker         y(Heap_lock, Mutex::_no_safepoint_check_flag);
-          CMSTokenSync        z(true);   // not strictly needed.
-          if (_collectorState == Resizing) {
-            compute_new_size();
-            save_heap_summary();
-            _collectorState = Resetting;
-          } else {
-            assert(_collectorState == Idling, "The state should only change"
-                   " because the foreground collector has finished the collection");
-          }
-        }
-        break;
-      }
-      case Resetting:
-        // CMS heap resizing has been completed
-        reset_concurrent();
-        assert(_collectorState == Idling, "Collector state should "
-          "have changed");
-
-        MetaspaceGC::set_should_concurrent_collect(false);
-
-        stats().record_cms_end();
-        // Don't move the concurrent_phases_end() and compute_new_size()
-        // calls to here because a preempted background collection
-        // has it's state set to "Resetting".
-        break;
-      case Idling:
-      default:
-        ShouldNotReachHere();
-        break;
-    }
-    log_debug(gc, state)("  Thread " INTPTR_FORMAT " done - next CMS state %d",
-                         p2i(Thread::current()), _collectorState);
-    assert(_foregroundGCShouldWait, "block post-condition");
-  }
-
-  // Should this be in gc_epilogue?
-  heap->counters()->update_counters();
-
-  {
-    // Clear _foregroundGCShouldWait and, in the event that the
-    // foreground collector is waiting, notify it, before
-    // returning.
-    MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-    _foregroundGCShouldWait = false;
-    if (_foregroundGCIsActive) {
-      CGC_lock->notify();
-    }
-    assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-           "Possible deadlock");
-  }
-  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " exiting collection CMS state %d",
-                       p2i(Thread::current()), _collectorState);
-  log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
-                     prev_used / K, _cmsGen->used()/K, _cmsGen->capacity() /K);
-}
-
-void CMSCollector::register_gc_start(GCCause::Cause cause) {
-  _cms_start_registered = true;
-  _gc_timer_cm->register_gc_start();
-  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
-}
-
-void CMSCollector::register_gc_end() {
-  if (_cms_start_registered) {
-    report_heap_summary(GCWhen::AfterGC);
-
-    _gc_timer_cm->register_gc_end();
-    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
-    _cms_start_registered = false;
-  }
-}
-
-void CMSCollector::save_heap_summary() {
-  CMSHeap* heap = CMSHeap::heap();
-  _last_heap_summary = heap->create_heap_summary();
-  _last_metaspace_summary = heap->create_metaspace_summary();
-}
-
-void CMSCollector::report_heap_summary(GCWhen::Type when) {
-  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary);
-  _gc_tracer_cm->report_metaspace_summary(when, _last_metaspace_summary);
-}
-
-bool CMSCollector::waitForForegroundGC() {
-  bool res = false;
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should have CMS token");
-  // Block the foreground collector until the
-  // background collectors decides whether to
-  // yield.
-  MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  _foregroundGCShouldWait = true;
-  if (_foregroundGCIsActive) {
-    // The background collector yields to the
-    // foreground collector and returns a value
-    // indicating that it has yielded.  The foreground
-    // collector can proceed.
-    res = true;
-    _foregroundGCShouldWait = false;
-    ConcurrentMarkSweepThread::clear_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_has_token);
-    ConcurrentMarkSweepThread::set_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_wants_token);
-    // Get a possibly blocked foreground thread going
-    CGC_lock->notify();
-    log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " waiting at CMS state %d",
-                         p2i(Thread::current()), _collectorState);
-    while (_foregroundGCIsActive) {
-      CGC_lock->wait_without_safepoint_check();
-    }
-    ConcurrentMarkSweepThread::set_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_has_token);
-    ConcurrentMarkSweepThread::clear_CMS_flag(
-      ConcurrentMarkSweepThread::CMS_cms_wants_token);
-  }
-  log_debug(gc, state)("CMS Thread " INTPTR_FORMAT " continuing at CMS state %d",
-                       p2i(Thread::current()), _collectorState);
-  return res;
-}
-
-// Because of the need to lock the free lists and other structures in
-// the collector, common to all the generations that the collector is
-// collecting, we need the gc_prologues of individual CMS generations
-// delegate to their collector. It may have been simpler had the
-// current infrastructure allowed one to call a prologue on a
-// collector. In the absence of that we have the generation's
-// prologue delegate to the collector, which delegates back
-// some "local" work to a worker method in the individual generations
-// that it's responsible for collecting, while itself doing any
-// work common to all generations it's responsible for. A similar
-// comment applies to the  gc_epilogue()'s.
-// The role of the variable _between_prologue_and_epilogue is to
-// enforce the invocation protocol.
-void CMSCollector::gc_prologue(bool full) {
-  // Call gc_prologue_work() for the CMSGen
-  // we are responsible for.
-
-  // The following locking discipline assumes that we are only called
-  // when the world is stopped.
-  assert(SafepointSynchronize::is_at_safepoint(), "world is stopped assumption");
-
-  // The CMSCollector prologue must call the gc_prologues for the
-  // "generations" that it's responsible
-  // for.
-
-  assert(   Thread::current()->is_VM_thread()
-         || (   CMSScavengeBeforeRemark
-             && Thread::current()->is_ConcurrentGC_thread()),
-         "Incorrect thread type for prologue execution");
-
-  if (_between_prologue_and_epilogue) {
-    // We have already been invoked; this is a gc_prologue delegation
-    // from yet another CMS generation that we are responsible for, just
-    // ignore it since all relevant work has already been done.
-    return;
-  }
-
-  // set a bit saying prologue has been called; cleared in epilogue
-  _between_prologue_and_epilogue = true;
-  // Claim locks for common data structures, then call gc_prologue_work()
-  // for each CMSGen.
-
-  getFreelistLocks();   // gets free list locks on constituent spaces
-  bitMapLock()->lock_without_safepoint_check();
-
-  // Should call gc_prologue_work() for all cms gens we are responsible for
-  bool duringMarking =    _collectorState >= Marking
-                         && _collectorState < Sweeping;
-
-  // The young collections clear the modified oops state, which tells if
-  // there are any modified oops in the class. The remark phase also needs
-  // that information. Tell the young collection to save the union of all
-  // modified klasses.
-  if (duringMarking) {
-    _ct->cld_rem_set()->set_accumulate_modified_oops(true);
-  }
-
-  bool registerClosure = duringMarking;
-
-  _cmsGen->gc_prologue_work(full, registerClosure, &_modUnionClosurePar);
-
-  if (!full) {
-    stats().record_gc0_begin();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
-
-  _capacity_at_prologue = capacity();
-  _used_at_prologue = used();
-  _cmsSpace->recalculate_used_stable();
-
-  // We enable promotion tracking so that card-scanning can recognize
-  // which objects have been promoted during this GC and skip them.
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _par_gc_thread_states[i]->promo.startTrackingPromotions();
-  }
-
-  // Delegate to CMScollector which knows how to coordinate between
-  // this and any other CMS generations that it is responsible for
-  // collecting.
-  collector()->gc_prologue(full);
-}
-
-// This is a "private" interface for use by this generation's CMSCollector.
-// Not to be called directly by any other entity (for instance,
-// GenCollectedHeap, which calls the "public" gc_prologue method above).
-void ConcurrentMarkSweepGeneration::gc_prologue_work(bool full,
-  bool registerClosure, ModUnionClosure* modUnionClosure) {
-  assert(!incremental_collection_failed(), "Shouldn't be set yet");
-  assert(cmsSpace()->preconsumptionDirtyCardClosure() == NULL,
-    "Should be NULL");
-  if (registerClosure) {
-    cmsSpace()->setPreconsumptionDirtyCardClosure(modUnionClosure);
-  }
-  cmsSpace()->gc_prologue();
-  // Clear stat counters
-  NOT_PRODUCT(
-    assert(_numObjectsPromoted == 0, "check");
-    assert(_numWordsPromoted   == 0, "check");
-    log_develop_trace(gc, alloc)("Allocated " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes concurrently",
-                                 _numObjectsAllocated, _numWordsAllocated*sizeof(HeapWord));
-    _numObjectsAllocated = 0;
-    _numWordsAllocated   = 0;
-  )
-}
-
-void CMSCollector::gc_epilogue(bool full) {
-  // The following locking discipline assumes that we are only called
-  // when the world is stopped.
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world is stopped assumption");
-
-  // Currently the CMS epilogue (see CompactibleFreeListSpace) merely checks
-  // if linear allocation blocks need to be appropriately marked to allow the
-  // the blocks to be parsable. We also check here whether we need to nudge the
-  // CMS collector thread to start a new cycle (if it's not already active).
-  assert(   Thread::current()->is_VM_thread()
-         || (   CMSScavengeBeforeRemark
-             && Thread::current()->is_ConcurrentGC_thread()),
-         "Incorrect thread type for epilogue execution");
-
-  if (!_between_prologue_and_epilogue) {
-    // We have already been invoked; this is a gc_epilogue delegation
-    // from yet another CMS generation that we are responsible for, just
-    // ignore it since all relevant work has already been done.
-    return;
-  }
-  assert(haveFreelistLocks(), "must have freelist locks");
-  assert_lock_strong(bitMapLock());
-
-  _ct->cld_rem_set()->set_accumulate_modified_oops(false);
-
-  _cmsGen->gc_epilogue_work(full);
-
-  if (_collectorState == AbortablePreclean || _collectorState == Precleaning) {
-    // in case sampling was not already enabled, enable it
-    _start_sampling = true;
-  }
-  // reset _eden_chunk_array so sampling starts afresh
-  _eden_chunk_index = 0;
-
-  size_t cms_used   = _cmsGen->cmsSpace()->used();
-  _cmsGen->cmsSpace()->recalculate_used_stable();
-
-  // update performance counters - this uses a special version of
-  // update_counters() that allows the utilization to be passed as a
-  // parameter, avoiding multiple calls to used().
-  //
-  _cmsGen->update_counters(cms_used);
-
-  bitMapLock()->unlock();
-  releaseFreelistLocks();
-
-  if (!CleanChunkPoolAsync) {
-    Chunk::clean_chunk_pool();
-  }
-
-  set_did_compact(false);
-  _between_prologue_and_epilogue = false;  // ready for next cycle
-}
-
-void ConcurrentMarkSweepGeneration::gc_epilogue(bool full) {
-  collector()->gc_epilogue(full);
-
-  // When using ParNew, promotion tracking should have already been
-  // disabled. However, the prologue (which enables promotion
-  // tracking) and epilogue are called irrespective of the type of
-  // GC. So they will also be called before and after Full GCs, during
-  // which promotion tracking will not be explicitly disabled. So,
-  // it's safer to also disable it here too (to be symmetric with
-  // enabling it in the prologue).
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _par_gc_thread_states[i]->promo.stopTrackingPromotions();
-  }
-}
-
-void ConcurrentMarkSweepGeneration::gc_epilogue_work(bool full) {
-  assert(!incremental_collection_failed(), "Should have been cleared");
-  cmsSpace()->setPreconsumptionDirtyCardClosure(NULL);
-  cmsSpace()->gc_epilogue();
-    // Print stat counters
-  NOT_PRODUCT(
-    assert(_numObjectsAllocated == 0, "check");
-    assert(_numWordsAllocated == 0, "check");
-    log_develop_trace(gc, promotion)("Promoted " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
-                                     _numObjectsPromoted, _numWordsPromoted*sizeof(HeapWord));
-    _numObjectsPromoted = 0;
-    _numWordsPromoted   = 0;
-  )
-
-  // Call down the chain in contiguous_available needs the freelistLock
-  // so print this out before releasing the freeListLock.
-  log_develop_trace(gc)(" Contiguous available " SIZE_FORMAT " bytes ", contiguous_available());
-}
-
-#ifndef PRODUCT
-bool CMSCollector::have_cms_token() {
-  Thread* thr = Thread::current();
-  if (thr->is_VM_thread()) {
-    return ConcurrentMarkSweepThread::vm_thread_has_cms_token();
-  } else if (thr->is_ConcurrentGC_thread()) {
-    return ConcurrentMarkSweepThread::cms_thread_has_cms_token();
-  } else if (thr->is_GC_task_thread()) {
-    return ConcurrentMarkSweepThread::vm_thread_has_cms_token() &&
-           ParGCRareEvent_lock->owned_by_self();
-  }
-  return false;
-}
-
-// Check reachability of the given heap address in CMS generation,
-// treating all other generations as roots.
-bool CMSCollector::is_cms_reachable(HeapWord* addr) {
-  // We could "guarantee" below, rather than assert, but I'll
-  // leave these as "asserts" so that an adventurous debugger
-  // could try this in the product build provided some subset of
-  // the conditions were met, provided they were interested in the
-  // results and knew that the computation below wouldn't interfere
-  // with other concurrent computations mutating the structures
-  // being read or written.
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "Else mutations in object graph will make answer suspect");
-  assert(have_cms_token(), "Should hold cms token");
-  assert(haveFreelistLocks(), "must hold free list locks");
-  assert_lock_strong(bitMapLock());
-
-  // Clear the marking bit map array before starting, but, just
-  // for kicks, first report if the given address is already marked
-  tty->print_cr("Start: Address " PTR_FORMAT " is%s marked", p2i(addr),
-                _markBitMap.isMarked(addr) ? "" : " not");
-
-  if (verify_after_remark()) {
-    MutexLocker x(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
-    bool result = verification_mark_bm()->isMarked(addr);
-    tty->print_cr("TransitiveMark: Address " PTR_FORMAT " %s marked", p2i(addr),
-                  result ? "IS" : "is NOT");
-    return result;
-  } else {
-    tty->print_cr("Could not compute result");
-    return false;
-  }
-}
-#endif
-
-void
-CMSCollector::print_on_error(outputStream* st) {
-  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
-  if (collector != NULL) {
-    CMSBitMap* bitmap = &collector->_markBitMap;
-    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, p2i(bitmap));
-    bitmap->print_on_error(st, " Bits: ");
-
-    st->cr();
-
-    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
-    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, p2i(mut_bitmap));
-    mut_bitmap->print_on_error(st, " Bits: ");
-  }
-}
-
-////////////////////////////////////////////////////////
-// CMS Verification Support
-////////////////////////////////////////////////////////
-// Following the remark phase, the following invariant
-// should hold -- each object in the CMS heap which is
-// marked in markBitMap() should be marked in the verification_mark_bm().
-
-class VerifyMarkedClosure: public BitMapClosure {
-  CMSBitMap* _marks;
-  bool       _failed;
-
- public:
-  VerifyMarkedClosure(CMSBitMap* bm): _marks(bm), _failed(false) {}
-
-  bool do_bit(size_t offset) {
-    HeapWord* addr = _marks->offsetToHeapWord(offset);
-    if (!_marks->isMarked(addr)) {
-      Log(gc, verify) log;
-      ResourceMark rm;
-      LogStream ls(log.error());
-      oop(addr)->print_on(&ls);
-      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
-      _failed = true;
-    }
-    return true;
-  }
-
-  bool failed() { return _failed; }
-};
-
-bool CMSCollector::verify_after_remark() {
-  GCTraceTime(Info, gc, phases, verify) tm("Verifying CMS Marking.");
-  MutexLocker ml(verification_mark_bm()->lock(), Mutex::_no_safepoint_check_flag);
-  static bool init = false;
-
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "Else mutations in object graph will make answer suspect");
-  assert(have_cms_token(),
-         "Else there may be mutual interference in use of "
-         " verification data structures");
-  assert(_collectorState > Marking && _collectorState <= Sweeping,
-         "Else marking info checked here may be obsolete");
-  assert(haveFreelistLocks(), "must hold free list locks");
-  assert_lock_strong(bitMapLock());
-
-
-  // Allocate marking bit map if not already allocated
-  if (!init) { // first time
-    if (!verification_mark_bm()->allocate(_span)) {
-      return false;
-    }
-    init = true;
-  }
-
-  assert(verification_mark_stack()->isEmpty(), "Should be empty");
-
-  // Turn off refs discovery -- so we will be tracing through refs.
-  // This is as intended, because by this time
-  // GC must already have cleared any refs that need to be cleared,
-  // and traced those that need to be marked; moreover,
-  // the marking done here is not going to interfere in any
-  // way with the marking information used by GC.
-  NoRefDiscovery no_discovery(ref_processor());
-
-#if COMPILER2_OR_JVMCI
-  DerivedPointerTableDeactivate dpt_deact;
-#endif
-
-  // Clear any marks from a previous round
-  verification_mark_bm()->clear_all();
-  assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
-  verify_work_stacks_empty();
-
-  CMSHeap* heap = CMSHeap::heap();
-  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
-  // Update the saved marks which may affect the root scans.
-  heap->save_marks();
-
-  if (CMSRemarkVerifyVariant == 1) {
-    // In this first variant of verification, we complete
-    // all marking, then check if the new marks-vector is
-    // a subset of the CMS marks-vector.
-    verify_after_remark_work_1();
-  } else {
-    guarantee(CMSRemarkVerifyVariant == 2, "Range checking for CMSRemarkVerifyVariant should guarantee 1 or 2");
-    // In this second variant of verification, we flag an error
-    // (i.e. an object reachable in the new marks-vector not reachable
-    // in the CMS marks-vector) immediately, also indicating the
-    // identify of an object (A) that references the unmarked object (B) --
-    // presumably, a mutation to A failed to be picked up by preclean/remark?
-    verify_after_remark_work_2();
-  }
-
-  return true;
-}
-
-void CMSCollector::verify_after_remark_work_1() {
-  ResourceMark rm;
-  HandleMark  hm;
-  CMSHeap* heap = CMSHeap::heap();
-
-  // Get a clear set of claim bits for the roots processing to work with.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // Mark from roots one level into CMS
-  MarkRefsIntoClosure notOlder(_span, verification_mark_bm());
-  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-
-  {
-    StrongRootsScope srs(1);
-
-    heap->cms_process_roots(&srs,
-                           true,   // young gen as roots
-                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
-                           should_unload_classes(),
-                           &notOlder,
-                           NULL);
-  }
-
-  // Now mark from the roots
-  MarkFromRootsClosure markFromRootsClosure(this, _span,
-    verification_mark_bm(), verification_mark_stack(),
-    false /* don't yield */, true /* verifying */);
-  assert(_restart_addr == NULL, "Expected pre-condition");
-  verification_mark_bm()->iterate(&markFromRootsClosure);
-  while (_restart_addr != NULL) {
-    // Deal with stack overflow: by restarting at the indicated
-    // address.
-    HeapWord* ra = _restart_addr;
-    markFromRootsClosure.reset(ra);
-    _restart_addr = NULL;
-    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
-  }
-  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
-  verify_work_stacks_empty();
-
-  // Marking completed -- now verify that each bit marked in
-  // verification_mark_bm() is also marked in markBitMap(); flag all
-  // errors by printing corresponding objects.
-  VerifyMarkedClosure vcl(markBitMap());
-  verification_mark_bm()->iterate(&vcl);
-  if (vcl.failed()) {
-    Log(gc, verify) log;
-    log.error("Failed marking verification after remark");
-    ResourceMark rm;
-    LogStream ls(log.error());
-    heap->print_on(&ls);
-    fatal("CMS: failed marking verification after remark");
-  }
-}
-
-class VerifyCLDOopsCLDClosure : public CLDClosure {
-  class VerifyCLDOopsClosure : public OopClosure {
-    CMSBitMap* _bitmap;
-   public:
-    VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
-    void do_oop(oop* p)       { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
-    void do_oop(narrowOop* p) { ShouldNotReachHere(); }
-  } _oop_closure;
- public:
-  VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
-  void do_cld(ClassLoaderData* cld) {
-    cld->oops_do(&_oop_closure, ClassLoaderData::_claim_none, false);
-  }
-};
-
-void CMSCollector::verify_after_remark_work_2() {
-  ResourceMark rm;
-  HandleMark  hm;
-  CMSHeap* heap = CMSHeap::heap();
-
-  // Get a clear set of claim bits for the roots processing to work with.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  // Mark from roots one level into CMS
-  MarkRefsIntoVerifyClosure notOlder(_span, verification_mark_bm(),
-                                     markBitMap());
-  CLDToOopClosure cld_closure(&notOlder, ClassLoaderData::_claim_strong);
-
-  heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-
-  {
-    StrongRootsScope srs(1);
-
-    heap->cms_process_roots(&srs,
-                           true,   // young gen as roots
-                           GenCollectedHeap::ScanningOption(roots_scanning_options()),
-                           should_unload_classes(),
-                           &notOlder,
-                           &cld_closure);
-  }
-
-  // Now mark from the roots
-  MarkFromRootsVerifyClosure markFromRootsClosure(this, _span,
-    verification_mark_bm(), markBitMap(), verification_mark_stack());
-  assert(_restart_addr == NULL, "Expected pre-condition");
-  verification_mark_bm()->iterate(&markFromRootsClosure);
-  while (_restart_addr != NULL) {
-    // Deal with stack overflow: by restarting at the indicated
-    // address.
-    HeapWord* ra = _restart_addr;
-    markFromRootsClosure.reset(ra);
-    _restart_addr = NULL;
-    verification_mark_bm()->iterate(&markFromRootsClosure, ra, _span.end());
-  }
-  assert(verification_mark_stack()->isEmpty(), "Should have been drained");
-  verify_work_stacks_empty();
-
-  VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
-  ClassLoaderDataGraph::cld_do(&verify_cld_oops);
-
-  // Marking completed -- now verify that each bit marked in
-  // verification_mark_bm() is also marked in markBitMap(); flag all
-  // errors by printing corresponding objects.
-  VerifyMarkedClosure vcl(markBitMap());
-  verification_mark_bm()->iterate(&vcl);
-  assert(!vcl.failed(), "Else verification above should not have succeeded");
-}
-
-void ConcurrentMarkSweepGeneration::save_marks() {
-  // delegate to CMS space
-  cmsSpace()->save_marks();
-}
-
-bool ConcurrentMarkSweepGeneration::no_allocs_since_save_marks() {
-  return cmsSpace()->no_allocs_since_save_marks();
-}
-
-void
-ConcurrentMarkSweepGeneration::oop_iterate(OopIterateClosure* cl) {
-  if (freelistLock()->owned_by_self()) {
-    Generation::oop_iterate(cl);
-  } else {
-    MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
-    Generation::oop_iterate(cl);
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::object_iterate(ObjectClosure* cl) {
-  if (freelistLock()->owned_by_self()) {
-    Generation::object_iterate(cl);
-  } else {
-    MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
-    Generation::object_iterate(cl);
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) {
-  if (freelistLock()->owned_by_self()) {
-    Generation::safe_object_iterate(cl);
-  } else {
-    MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
-    Generation::safe_object_iterate(cl);
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::post_compact() {
-}
-
-void
-ConcurrentMarkSweepGeneration::prepare_for_verify() {
-  // Fix the linear allocation blocks to look like free blocks.
-
-  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
-  // are not called when the heap is verified during universe initialization and
-  // at vm shutdown.
-  if (freelistLock()->owned_by_self()) {
-    cmsSpace()->prepare_for_verify();
-  } else {
-    MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
-    cmsSpace()->prepare_for_verify();
-  }
-}
-
-void
-ConcurrentMarkSweepGeneration::verify() {
-  // Locks are normally acquired/released in gc_prologue/gc_epilogue, but those
-  // are not called when the heap is verified during universe initialization and
-  // at vm shutdown.
-  if (freelistLock()->owned_by_self()) {
-    cmsSpace()->verify();
-  } else {
-    MutexLocker fll(freelistLock(), Mutex::_no_safepoint_check_flag);
-    cmsSpace()->verify();
-  }
-}
-
-void CMSCollector::verify() {
-  _cmsGen->verify();
-}
-
-#ifndef PRODUCT
-bool CMSCollector::overflow_list_is_empty() const {
-  assert(_num_par_pushes >= 0, "Inconsistency");
-  if (_overflow_list == NULL) {
-    assert(_num_par_pushes == 0, "Inconsistency");
-  }
-  return _overflow_list == NULL;
-}
-
-// The methods verify_work_stacks_empty() and verify_overflow_empty()
-// merely consolidate assertion checks that appear to occur together frequently.
-void CMSCollector::verify_work_stacks_empty() const {
-  assert(_markStack.isEmpty(), "Marking stack should be empty");
-  assert(overflow_list_is_empty(), "Overflow list should be empty");
-}
-
-void CMSCollector::verify_overflow_empty() const {
-  assert(overflow_list_is_empty(), "Overflow list should be empty");
-  assert(no_preserved_marks(), "No preserved marks");
-}
-#endif // PRODUCT
-
-// Decide if we want to enable class unloading as part of the
-// ensuing concurrent GC cycle. We will collect and
-// unload classes if it's the case that:
-//  (a) class unloading is enabled at the command line, and
-//  (b) old gen is getting really full
-// NOTE: Provided there is no change in the state of the heap between
-// calls to this method, it should have idempotent results. Moreover,
-// its results should be monotonically increasing (i.e. going from 0 to 1,
-// but not 1 to 0) between successive calls between which the heap was
-// not collected. For the implementation below, it must thus rely on
-// the property that concurrent_cycles_since_last_unload()
-// will not decrease unless a collection cycle happened and that
-// _cmsGen->is_too_full() are
-// themselves also monotonic in that sense. See check_monotonicity()
-// below.
-void CMSCollector::update_should_unload_classes() {
-  _should_unload_classes = false;
-  if (CMSClassUnloadingEnabled) {
-    _should_unload_classes = (concurrent_cycles_since_last_unload() >=
-                              CMSClassUnloadingMaxInterval)
-                           || _cmsGen->is_too_full();
-  }
-}
-
-bool ConcurrentMarkSweepGeneration::is_too_full() const {
-  bool res = should_concurrent_collect();
-  res = res && (occupancy() > (double)CMSIsTooFullPercentage/100.0);
-  return res;
-}
-
-void CMSCollector::setup_cms_unloading_and_verification_state() {
-  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
-                             || VerifyBeforeExit;
-  const  int  rso           =   GenCollectedHeap::SO_AllCodeCache;
-
-  // We set the proper root for this CMS cycle here.
-  if (should_unload_classes()) {   // Should unload classes this cycle
-    remove_root_scanning_option(rso);  // Shrink the root set appropriately
-    set_verifying(should_verify);    // Set verification state for this cycle
-    return;                            // Nothing else needs to be done at this time
-  }
-
-  // Not unloading classes this cycle
-  assert(!should_unload_classes(), "Inconsistency!");
-
-  // If we are not unloading classes then add SO_AllCodeCache to root
-  // scanning options.
-  add_root_scanning_option(rso);
-
-  if ((!verifying() || unloaded_classes_last_cycle()) && should_verify) {
-    set_verifying(true);
-  } else if (verifying() && !should_verify) {
-    // We were verifying, but some verification flags got disabled.
-    set_verifying(false);
-    // Exclude symbols, strings and code cache elements from root scanning to
-    // reduce IM and RM pauses.
-    remove_root_scanning_option(rso);
-  }
-}
-
-
-#ifndef PRODUCT
-HeapWord* CMSCollector::block_start(const void* p) const {
-  const HeapWord* addr = (HeapWord*)p;
-  if (_span.contains(p)) {
-    if (_cmsGen->cmsSpace()->is_in_reserved(addr)) {
-      return _cmsGen->cmsSpace()->block_start(p);
-    }
-  }
-  return NULL;
-}
-#endif
-
-HeapWord*
-ConcurrentMarkSweepGeneration::expand_and_allocate(size_t word_size,
-                                                   bool   tlab,
-                                                   bool   parallel) {
-  CMSSynchronousYieldRequest yr;
-  assert(!tlab, "Can't deal with TLAB allocation");
-  MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
-  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
-  if (GCExpandToAllocateDelayMillis > 0) {
-    os::naked_sleep(GCExpandToAllocateDelayMillis);
-  }
-  return have_lock_and_allocate(word_size, tlab);
-}
-
-void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
-    size_t bytes,
-    size_t expand_bytes,
-    CMSExpansionCause::Cause cause)
-{
-
-  bool success = expand(bytes, expand_bytes);
-
-  // remember why we expanded; this information is used
-  // by shouldConcurrentCollect() when making decisions on whether to start
-  // a new CMS cycle.
-  if (success) {
-    set_expansion_cause(cause);
-    log_trace(gc)("Expanded CMS gen for %s",  CMSExpansionCause::to_string(cause));
-  }
-}
-
-HeapWord* ConcurrentMarkSweepGeneration::expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz) {
-  HeapWord* res = NULL;
-  MutexLocker x(ParGCRareEvent_lock);
-  while (true) {
-    // Expansion by some other thread might make alloc OK now:
-    res = ps->lab.alloc(word_sz);
-    if (res != NULL) return res;
-    // If there's not enough expansion space available, give up.
-    if (_virtual_space.uncommitted_size() < (word_sz * HeapWordSize)) {
-      return NULL;
-    }
-    // Otherwise, we try expansion.
-    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
-    // Now go around the loop and try alloc again;
-    // A competing par_promote might beat us to the expansion space,
-    // so we may go around the loop again if promotion fails again.
-    if (GCExpandToAllocateDelayMillis > 0) {
-      os::naked_sleep(GCExpandToAllocateDelayMillis);
-    }
-  }
-}
-
-
-bool ConcurrentMarkSweepGeneration::expand_and_ensure_spooling_space(
-  PromotionInfo* promo) {
-  MutexLocker x(ParGCRareEvent_lock);
-  size_t refill_size_bytes = promo->refillSize() * HeapWordSize;
-  while (true) {
-    // Expansion by some other thread might make alloc OK now:
-    if (promo->ensure_spooling_space()) {
-      assert(promo->has_spooling_space(),
-             "Post-condition of successful ensure_spooling_space()");
-      return true;
-    }
-    // If there's not enough expansion space available, give up.
-    if (_virtual_space.uncommitted_size() < refill_size_bytes) {
-      return false;
-    }
-    // Otherwise, we try expansion.
-    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
-    // Now go around the loop and try alloc again;
-    // A competing allocation might beat us to the expansion space,
-    // so we may go around the loop again if allocation fails again.
-    if (GCExpandToAllocateDelayMillis > 0) {
-      os::naked_sleep(GCExpandToAllocateDelayMillis);
-    }
-  }
-}
-
-void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
-  // Only shrink if a compaction was done so that all the free space
-  // in the generation is in a contiguous block at the end.
-  if (did_compact()) {
-    CardGeneration::shrink(bytes);
-  }
-}
-
-void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
-  assert_locked_or_safepoint(Heap_lock);
-}
-
-void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  assert_lock_strong(freelistLock());
-  log_trace(gc)("Shrinking of CMS not yet implemented");
-  return;
-}
-
-
-// Simple ctor/dtor wrapper for accounting & timer chores around concurrent
-// phases.
-class CMSPhaseAccounting: public StackObj {
- public:
-  CMSPhaseAccounting(CMSCollector *collector,
-                     const char *title);
-  ~CMSPhaseAccounting();
-
- private:
-  CMSCollector *_collector;
-  const char *_title;
-  GCTraceConcTime(Info, gc) _trace_time;
-
- public:
-  // Not MT-safe; so do not pass around these StackObj's
-  // where they may be accessed by other threads.
-  double wallclock_millis() {
-    return TimeHelper::counter_to_millis(os::elapsed_counter() - _trace_time.start_time());
-  }
-};
-
-CMSPhaseAccounting::CMSPhaseAccounting(CMSCollector *collector,
-                                       const char *title) :
-  _collector(collector), _title(title), _trace_time(title) {
-
-  _collector->resetYields();
-  _collector->resetTimer();
-  _collector->startTimer();
-  _collector->gc_timer_cm()->register_gc_concurrent_start(title);
-}
-
-CMSPhaseAccounting::~CMSPhaseAccounting() {
-  _collector->gc_timer_cm()->register_gc_concurrent_end();
-  _collector->stopTimer();
-  log_debug(gc)("Concurrent active time: %.3fms", TimeHelper::counter_to_millis(_collector->timerTicks()));
-  log_trace(gc)(" (CMS %s yielded %d times)", _title, _collector->yields());
-}
-
-// CMS work
-
-// The common parts of CMSParInitialMarkTask and CMSParRemarkTask.
-class CMSParMarkTask : public AbstractGangTask {
- protected:
-  CMSCollector*     _collector;
-  uint              _n_workers;
-  CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) :
-      AbstractGangTask(name),
-      _collector(collector),
-      _n_workers(n_workers) {}
-  // Work method in support of parallel rescan ... of young gen spaces
-  void do_young_space_rescan(OopsInGenClosure* cl,
-                             ContiguousSpace* space,
-                             HeapWord** chunk_array, size_t chunk_top);
-  void work_on_young_gen_roots(OopsInGenClosure* cl);
-};
-
-// Parallel initial mark task
-class CMSParInitialMarkTask: public CMSParMarkTask {
-  StrongRootsScope* _strong_roots_scope;
- public:
-  CMSParInitialMarkTask(CMSCollector* collector, StrongRootsScope* strong_roots_scope, uint n_workers) :
-      CMSParMarkTask("Scan roots and young gen for initial mark in parallel", collector, n_workers),
-      _strong_roots_scope(strong_roots_scope) {}
-  void work(uint worker_id);
-};
-
-// Checkpoint the roots into this generation from outside
-// this generation. [Note this initial checkpoint need only
-// be approximate -- we'll do a catch up phase subsequently.]
-void CMSCollector::checkpointRootsInitial() {
-  assert(_collectorState == InitialMarking, "Wrong collector state");
-  check_correct_thread_executing();
-  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
-
-  save_heap_summary();
-  report_heap_summary(GCWhen::BeforeGC);
-
-  ReferenceProcessor* rp = ref_processor();
-  assert(_restart_addr == NULL, "Control point invariant");
-  {
-    // acquire locks for subsequent manipulations
-    MutexLocker x(bitMapLock(),
-                  Mutex::_no_safepoint_check_flag);
-    checkpointRootsInitialWork();
-    // enable ("weak") refs discovery
-    rp->enable_discovery();
-    _collectorState = Marking;
-  }
-
-  _cmsGen->cmsSpace()->recalculate_used_stable();
-}
-
-void CMSCollector::checkpointRootsInitialWork() {
-  assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped");
-  assert(_collectorState == InitialMarking, "just checking");
-
-  // Already have locks.
-  assert_lock_strong(bitMapLock());
-  assert(_markBitMap.isAllClear(), "was reset at end of previous cycle");
-
-  // Setup the verification and class unloading state for this
-  // CMS collection cycle.
-  setup_cms_unloading_and_verification_state();
-
-  GCTraceTime(Trace, gc, phases) ts("checkpointRootsInitialWork", _gc_timer_cm);
-
-  // Reset all the PLAB chunk arrays if necessary.
-  if (_survivor_plab_array != NULL && !CMSPLABRecordAlways) {
-    reset_survivor_plab_arrays();
-  }
-
-  ResourceMark rm;
-  HandleMark  hm;
-
-  MarkRefsIntoClosure notOlder(_span, &_markBitMap);
-  CMSHeap* heap = CMSHeap::heap();
-
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-
-  heap->ensure_parsability(false);  // fill TLABs, but no need to retire them
-  // Update the saved marks which may affect the root scans.
-  heap->save_marks();
-
-  // weak reference processing has not started yet.
-  ref_processor()->set_enqueuing_is_done(false);
-
-  // Need to remember all newly created CLDs,
-  // so that we can guarantee that the remark finds them.
-  ClassLoaderDataGraph::remember_new_clds(true);
-
-  // Whenever a CLD is found, it will be claimed before proceeding to mark
-  // the klasses. The claimed marks need to be cleared before marking starts.
-  ClassLoaderDataGraph::clear_claimed_marks();
-
-  print_eden_and_survivor_chunk_arrays();
-
-  {
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTableDeactivate dpt_deact;
-#endif
-    if (CMSParallelInitialMarkEnabled) {
-      // The parallel version.
-      WorkGang* workers = heap->workers();
-      assert(workers != NULL, "Need parallel worker threads.");
-      uint n_workers = workers->active_workers();
-
-      StrongRootsScope srs(n_workers);
-
-      CMSParInitialMarkTask tsk(this, &srs, n_workers);
-      initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
-      // If the total workers is greater than 1, then multiple workers
-      // may be used at some time and the initialization has been set
-      // such that the single threaded path cannot be used.
-      if (workers->total_workers() > 1) {
-        workers->run_task(&tsk);
-      } else {
-        tsk.work(0);
-      }
-    } else {
-      // The serial version.
-      CLDToOopClosure cld_closure(&notOlder, ClassLoaderData::_claim_strong);
-      heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-
-      StrongRootsScope srs(1);
-
-      heap->cms_process_roots(&srs,
-                             true,   // young gen as roots
-                             GenCollectedHeap::ScanningOption(roots_scanning_options()),
-                             should_unload_classes(),
-                             &notOlder,
-                             &cld_closure);
-    }
-  }
-
-  // Clear mod-union table; it will be dirtied in the prologue of
-  // CMS generation per each young generation collection.
-
-  assert(_modUnionTable.isAllClear(),
-       "Was cleared in most recent final checkpoint phase"
-       " or no bits are set in the gc_prologue before the start of the next "
-       "subsequent marking phase.");
-
-  assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
-
-  // Save the end of the used_region of the constituent generations
-  // to be used to limit the extent of sweep in each generation.
-  save_sweep_limits();
-  verify_overflow_empty();
-}
-
-bool CMSCollector::markFromRoots() {
-  // we might be tempted to assert that:
-  // assert(!SafepointSynchronize::is_at_safepoint(),
-  //        "inconsistent argument?");
-  // However that wouldn't be right, because it's possible that
-  // a safepoint is indeed in progress as a young generation
-  // stop-the-world GC happens even as we mark in this generation.
-  assert(_collectorState == Marking, "inconsistent state?");
-  check_correct_thread_executing();
-  verify_overflow_empty();
-
-  // Weak ref discovery note: We may be discovering weak
-  // refs in this generation concurrent (but interleaved) with
-  // weak ref discovery by the young generation collector.
-
-  CMSTokenSyncWithLocks ts(true, bitMapLock());
-  GCTraceCPUTime tcpu;
-  CMSPhaseAccounting pa(this, "Concurrent Mark");
-  bool res = markFromRootsWork();
-  if (res) {
-    _collectorState = Precleaning;
-  } else { // We failed and a foreground collection wants to take over
-    assert(_foregroundGCIsActive, "internal state inconsistency");
-    assert(_restart_addr == NULL,  "foreground will restart from scratch");
-    log_debug(gc)("bailing out to foreground collection");
-  }
-  verify_overflow_empty();
-  return res;
-}
-
-bool CMSCollector::markFromRootsWork() {
-  // iterate over marked bits in bit map, doing a full scan and mark
-  // from these roots using the following algorithm:
-  // . if oop is to the right of the current scan pointer,
-  //   mark corresponding bit (we'll process it later)
-  // . else (oop is to left of current scan pointer)
-  //   push oop on marking stack
-  // . drain the marking stack
-
-  // Note that when we do a marking step we need to hold the
-  // bit map lock -- recall that direct allocation (by mutators)
-  // and promotion (by the young generation collector) is also
-  // marking the bit map. [the so-called allocate live policy.]
-  // Because the implementation of bit map marking is not
-  // robust wrt simultaneous marking of bits in the same word,
-  // we need to make sure that there is no such interference
-  // between concurrent such updates.
-
-  // already have locks
-  assert_lock_strong(bitMapLock());
-
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-  bool result = false;
-  if (CMSConcurrentMTEnabled && ConcGCThreads > 0) {
-    result = do_marking_mt();
-  } else {
-    result = do_marking_st();
-  }
-  return result;
-}
-
-// Forward decl
-class CMSConcMarkingTask;
-
-class CMSConcMarkingParallelTerminator: public ParallelTaskTerminator {
-  CMSCollector*       _collector;
-  CMSConcMarkingTask* _task;
- public:
-  virtual void yield();
-
-  // "n_threads" is the number of threads to be terminated.
-  // "queue_set" is a set of work queues of other threads.
-  // "collector" is the CMS collector associated with this task terminator.
-  // "yield" indicates whether we need the gang as a whole to yield.
-  CMSConcMarkingParallelTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
-    ParallelTaskTerminator(n_threads, queue_set),
-    _collector(collector) { }
-
-  void set_task(CMSConcMarkingTask* task) {
-    _task = task;
-  }
-};
-
-class CMSConcMarkingOWSTTerminator: public OWSTTaskTerminator {
-  CMSCollector*       _collector;
-  CMSConcMarkingTask* _task;
- public:
-  virtual void yield();
-
-  // "n_threads" is the number of threads to be terminated.
-  // "queue_set" is a set of work queues of other threads.
-  // "collector" is the CMS collector associated with this task terminator.
-  // "yield" indicates whether we need the gang as a whole to yield.
-  CMSConcMarkingOWSTTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) :
-    OWSTTaskTerminator(n_threads, queue_set),
-    _collector(collector) { }
-
-  void set_task(CMSConcMarkingTask* task) {
-    _task = task;
-  }
-};
-
-class CMSConcMarkingTaskTerminator {
- private:
-  ParallelTaskTerminator* _term;
- public:
-  CMSConcMarkingTaskTerminator(int n_threads, TaskQueueSetSuper* queue_set, CMSCollector* collector) {
-    if (UseOWSTTaskTerminator) {
-      _term = new CMSConcMarkingOWSTTerminator(n_threads, queue_set, collector);
-    } else {
-      _term = new CMSConcMarkingParallelTerminator(n_threads, queue_set, collector);
-    }
-  }
-  ~CMSConcMarkingTaskTerminator() {
-    assert(_term != NULL, "Must not be NULL");
-    delete _term;
-  }
-
-  void set_task(CMSConcMarkingTask* task);
-  ParallelTaskTerminator* terminator() const { return _term; }
-};
-
-class CMSConcMarkingTerminatorTerminator: public TerminatorTerminator {
-  CMSConcMarkingTask* _task;
- public:
-  bool should_exit_termination();
-  void set_task(CMSConcMarkingTask* task) {
-    _task = task;
-  }
-};
-
-// MT Concurrent Marking Task
-class CMSConcMarkingTask: public YieldingFlexibleGangTask {
-  CMSCollector*             _collector;
-  uint                      _n_workers;      // requested/desired # workers
-  bool                      _result;
-  CompactibleFreeListSpace* _cms_space;
-  char                      _pad_front[64];   // padding to ...
-  HeapWord* volatile        _global_finger;   // ... avoid sharing cache line
-  char                      _pad_back[64];
-  HeapWord*                 _restart_addr;
-
-  //  Exposed here for yielding support
-  Mutex* const _bit_map_lock;
-
-  // The per thread work queues, available here for stealing
-  OopTaskQueueSet*  _task_queues;
-
-  // Termination (and yielding) support
-  CMSConcMarkingTaskTerminator       _term;
-  CMSConcMarkingTerminatorTerminator _term_term;
-
- public:
-  CMSConcMarkingTask(CMSCollector* collector,
-                 CompactibleFreeListSpace* cms_space,
-                 YieldingFlexibleWorkGang* workers,
-                 OopTaskQueueSet* task_queues):
-    YieldingFlexibleGangTask("Concurrent marking done multi-threaded"),
-    _collector(collector),
-    _n_workers(0),
-    _result(true),
-    _cms_space(cms_space),
-    _bit_map_lock(collector->bitMapLock()),
-    _task_queues(task_queues),
-    _term(_n_workers, task_queues, _collector)
-  {
-    _requested_size = _n_workers;
-    _term.set_task(this);
-    _term_term.set_task(this);
-    _restart_addr = _global_finger = _cms_space->bottom();
-  }
-
-
-  OopTaskQueueSet* task_queues()  { return _task_queues; }
-
-  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
-  HeapWord* volatile* global_finger_addr() { return &_global_finger; }
-
-  ParallelTaskTerminator* terminator() { return _term.terminator(); }
-
-  virtual void set_for_termination(uint active_workers) {
-    terminator()->reset_for_reuse(active_workers);
-  }
-
-  void work(uint worker_id);
-  bool should_yield() {
-    return    ConcurrentMarkSweepThread::should_yield()
-           && !_collector->foregroundGCIsActive();
-  }
-
-  virtual void coordinator_yield();  // stuff done by coordinator
-  bool result() { return _result; }
-
-  void reset(HeapWord* ra) {
-    assert(_global_finger >= _cms_space->end(),  "Postcondition of ::work(i)");
-    _restart_addr = _global_finger = ra;
-    _term.terminator()->reset_for_reuse();
-  }
-
-  static bool get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
-                                           OopTaskQueue* work_q);
-
- private:
-  void do_scan_and_mark(int i, CompactibleFreeListSpace* sp);
-  void do_work_steal(int i);
-  void bump_global_finger(HeapWord* f);
-};
-
-bool CMSConcMarkingTerminatorTerminator::should_exit_termination() {
-  assert(_task != NULL, "Error");
-  return _task->yielding();
-  // Note that we do not need the disjunct || _task->should_yield() above
-  // because we want terminating threads to yield only if the task
-  // is already in the midst of yielding, which happens only after at least one
-  // thread has yielded.
-}
-
-void CMSConcMarkingParallelTerminator::yield() {
-  if (_task->should_yield()) {
-    _task->yield();
-  } else {
-    ParallelTaskTerminator::yield();
-  }
-}
-
-void CMSConcMarkingOWSTTerminator::yield() {
-  if (_task->should_yield()) {
-    _task->yield();
-  } else {
-    OWSTTaskTerminator::yield();
-  }
-}
-
-void CMSConcMarkingTaskTerminator::set_task(CMSConcMarkingTask* task) {
-  if (UseOWSTTaskTerminator) {
-    ((CMSConcMarkingOWSTTerminator*)_term)->set_task(task);
-  } else {
-    ((CMSConcMarkingParallelTerminator*)_term)->set_task(task);
-  }
-}
-
-////////////////////////////////////////////////////////////////
-// Concurrent Marking Algorithm Sketch
-////////////////////////////////////////////////////////////////
-// Until all tasks exhausted (both spaces):
-// -- claim next available chunk
-// -- bump global finger via CAS
-// -- find first object that starts in this chunk
-//    and start scanning bitmap from that position
-// -- scan marked objects for oops
-// -- CAS-mark target, and if successful:
-//    . if target oop is above global finger (volatile read)
-//      nothing to do
-//    . if target oop is in chunk and above local finger
-//        then nothing to do
-//    . else push on work-queue
-// -- Deal with possible overflow issues:
-//    . local work-queue overflow causes stuff to be pushed on
-//      global (common) overflow queue
-//    . always first empty local work queue
-//    . then get a batch of oops from global work queue if any
-//    . then do work stealing
-// -- When all tasks claimed (both spaces)
-//    and local work queue empty,
-//    then in a loop do:
-//    . check global overflow stack; steal a batch of oops and trace
-//    . try to steal from other threads oif GOS is empty
-//    . if neither is available, offer termination
-// -- Terminate and return result
-//
-void CMSConcMarkingTask::work(uint worker_id) {
-  elapsedTimer _timer;
-  ResourceMark rm;
-  HandleMark hm;
-
-  DEBUG_ONLY(_collector->verify_overflow_empty();)
-
-  // Before we begin work, our work queue should be empty
-  assert(work_queue(worker_id)->size() == 0, "Expected to be empty");
-  // Scan the bitmap covering _cms_space, tracing through grey objects.
-  _timer.start();
-  do_scan_and_mark(worker_id, _cms_space);
-  _timer.stop();
-  log_trace(gc, task)("Finished cms space scanning in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-
-  // ... do work stealing
-  _timer.reset();
-  _timer.start();
-  do_work_steal(worker_id);
-  _timer.stop();
-  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-  assert(_collector->_markStack.isEmpty(), "Should have been emptied");
-  assert(work_queue(worker_id)->size() == 0, "Should have been emptied");
-  // Note that under the current task protocol, the
-  // following assertion is true even of the spaces
-  // expanded since the completion of the concurrent
-  // marking. XXX This will likely change under a strict
-  // ABORT semantics.
-  // After perm removal the comparison was changed to
-  // greater than or equal to from strictly greater than.
-  // Before perm removal the highest address sweep would
-  // have been at the end of perm gen but now is at the
-  // end of the tenured gen.
-  assert(_global_finger >=  _cms_space->end(),
-         "All tasks have been completed");
-  DEBUG_ONLY(_collector->verify_overflow_empty();)
-}
-
-void CMSConcMarkingTask::bump_global_finger(HeapWord* f) {
-  HeapWord* read = _global_finger;
-  HeapWord* cur  = read;
-  while (f > read) {
-    cur = read;
-    read = Atomic::cmpxchg(f, &_global_finger, cur);
-    if (cur == read) {
-      // our cas succeeded
-      assert(_global_finger >= f, "protocol consistency");
-      break;
-    }
-  }
-}
-
-// This is really inefficient, and should be redone by
-// using (not yet available) block-read and -write interfaces to the
-// stack and the work_queue. XXX FIX ME !!!
-bool CMSConcMarkingTask::get_work_from_overflow_stack(CMSMarkStack* ovflw_stk,
-                                                      OopTaskQueue* work_q) {
-  // Fast lock-free check
-  if (ovflw_stk->length() == 0) {
-    return false;
-  }
-  assert(work_q->size() == 0, "Shouldn't steal");
-  MutexLocker ml(ovflw_stk->par_lock(),
-                 Mutex::_no_safepoint_check_flag);
-  // Grab up to 1/4 the size of the work queue
-  size_t num = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
-                    (size_t)ParGCDesiredObjsFromOverflowList);
-  num = MIN2(num, ovflw_stk->length());
-  for (int i = (int) num; i > 0; i--) {
-    oop cur = ovflw_stk->pop();
-    assert(cur != NULL, "Counted wrong?");
-    work_q->push(cur);
-  }
-  return num > 0;
-}
-
-void CMSConcMarkingTask::do_scan_and_mark(int i, CompactibleFreeListSpace* sp) {
-  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
-  int n_tasks = pst->n_tasks();
-  // We allow that there may be no tasks to do here because
-  // we are restarting after a stack overflow.
-  assert(pst->valid() || n_tasks == 0, "Uninitialized use?");
-  uint nth_task = 0;
-
-  HeapWord* aligned_start = sp->bottom();
-  if (sp->used_region().contains(_restart_addr)) {
-    // Align down to a card boundary for the start of 0th task
-    // for this space.
-    aligned_start = align_down(_restart_addr, CardTable::card_size);
-  }
-
-  size_t chunk_size = sp->marking_task_size();
-  while (pst->try_claim_task(/* reference */ nth_task)) {
-    // Having claimed the nth task in this space,
-    // compute the chunk that it corresponds to:
-    MemRegion span = MemRegion(aligned_start + nth_task*chunk_size,
-                               aligned_start + (nth_task+1)*chunk_size);
-    // Try and bump the global finger via a CAS;
-    // note that we need to do the global finger bump
-    // _before_ taking the intersection below, because
-    // the task corresponding to that region will be
-    // deemed done even if the used_region() expands
-    // because of allocation -- as it almost certainly will
-    // during start-up while the threads yield in the
-    // closure below.
-    HeapWord* finger = span.end();
-    bump_global_finger(finger);   // atomically
-    // There are null tasks here corresponding to chunks
-    // beyond the "top" address of the space.
-    span = span.intersection(sp->used_region());
-    if (!span.is_empty()) {  // Non-null task
-      HeapWord* prev_obj;
-      assert(!span.contains(_restart_addr) || nth_task == 0,
-             "Inconsistency");
-      if (nth_task == 0) {
-        // For the 0th task, we'll not need to compute a block_start.
-        if (span.contains(_restart_addr)) {
-          // In the case of a restart because of stack overflow,
-          // we might additionally skip a chunk prefix.
-          prev_obj = _restart_addr;
-        } else {
-          prev_obj = span.start();
-        }
-      } else {
-        // We want to skip the first object because
-        // the protocol is to scan any object in its entirety
-        // that _starts_ in this span; a fortiori, any
-        // object starting in an earlier span is scanned
-        // as part of an earlier claimed task.
-        // Below we use the "careful" version of block_start
-        // so we do not try to navigate uninitialized objects.
-        prev_obj = sp->block_start_careful(span.start());
-        // Below we use a variant of block_size that uses the
-        // Printezis bits to avoid waiting for allocated
-        // objects to become initialized/parsable.
-        while (prev_obj < span.start()) {
-          size_t sz = sp->block_size_no_stall(prev_obj, _collector);
-          if (sz > 0) {
-            prev_obj += sz;
-          } else {
-            // In this case we may end up doing a bit of redundant
-            // scanning, but that appears unavoidable, short of
-            // locking the free list locks; see bug 6324141.
-            break;
-          }
-        }
-      }
-      if (prev_obj < span.end()) {
-        MemRegion my_span = MemRegion(prev_obj, span.end());
-        // Do the marking work within a non-empty span --
-        // the last argument to the constructor indicates whether the
-        // iteration should be incremental with periodic yields.
-        ParMarkFromRootsClosure cl(this, _collector, my_span,
-                                   &_collector->_markBitMap,
-                                   work_queue(i),
-                                   &_collector->_markStack);
-        _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end());
-      } // else nothing to do for this task
-    }   // else nothing to do for this task
-  }
-  // We'd be tempted to assert here that since there are no
-  // more tasks left to claim in this space, the global_finger
-  // must exceed space->top() and a fortiori space->end(). However,
-  // that would not quite be correct because the bumping of
-  // global_finger occurs strictly after the claiming of a task,
-  // so by the time we reach here the global finger may not yet
-  // have been bumped up by the thread that claimed the last
-  // task.
-  pst->all_tasks_completed();
-}
-
-class ParConcMarkingClosure: public MetadataVisitingOopIterateClosure {
- private:
-  CMSCollector* _collector;
-  CMSConcMarkingTask* _task;
-  MemRegion     _span;
-  CMSBitMap*    _bit_map;
-  CMSMarkStack* _overflow_stack;
-  OopTaskQueue* _work_queue;
- protected:
-  DO_OOP_WORK_DEFN
- public:
-  ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue,
-                        CMSBitMap* bit_map, CMSMarkStack* overflow_stack):
-    MetadataVisitingOopIterateClosure(collector->ref_processor()),
-    _collector(collector),
-    _task(task),
-    _span(collector->_span),
-    _bit_map(bit_map),
-    _overflow_stack(overflow_stack),
-    _work_queue(work_queue)
-  { }
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-
-  void trim_queue(size_t max);
-  void handle_stack_overflow(HeapWord* lost);
-  void do_yield_check() {
-    if (_task->should_yield()) {
-      _task->yield();
-    }
-  }
-};
-
-DO_OOP_WORK_IMPL(ParConcMarkingClosure)
-
-// Grey object scanning during work stealing phase --
-// the salient assumption here is that any references
-// that are in these stolen objects being scanned must
-// already have been initialized (else they would not have
-// been published), so we do not need to check for
-// uninitialized objects before pushing here.
-void ParConcMarkingClosure::do_oop(oop obj) {
-  assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
-  HeapWord* addr = (HeapWord*)obj;
-  // Check if oop points into the CMS generation
-  // and is not marked
-  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // a white object ...
-    // If we manage to "claim" the object, by being the
-    // first thread to mark it, then we push it on our
-    // marking stack
-    if (_bit_map->par_mark(addr)) {     // ... now grey
-      // push on work queue (grey set)
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow ||
-          !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
-        // stack overflow
-        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
-        // We cannot assert that the overflow stack is full because
-        // it may have been emptied since.
-        assert(simulate_overflow ||
-               _work_queue->size() == _work_queue->max_elems(),
-              "Else push should have succeeded");
-        handle_stack_overflow(addr);
-      }
-    } // Else, some other thread got there first
-    do_yield_check();
-  }
-}
-
-void ParConcMarkingClosure::trim_queue(size_t max) {
-  while (_work_queue->size() > max) {
-    oop new_oop;
-    if (_work_queue->pop_local(new_oop)) {
-      assert(oopDesc::is_oop(new_oop), "Should be an oop");
-      assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
-      assert(_span.contains((HeapWord*)new_oop), "Not in span");
-      new_oop->oop_iterate(this);  // do_oop() above
-      do_yield_check();
-    }
-  }
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) {
-  // We need to do this under a mutex to prevent other
-  // workers from interfering with the work done below.
-  MutexLocker ml(_overflow_stack->par_lock(),
-                 Mutex::_no_safepoint_check_flag);
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _overflow_stack->reset();  // discard stack contents
-  _overflow_stack->expand(); // expand the stack if possible
-}
-
-
-void CMSConcMarkingTask::do_work_steal(int i) {
-  OopTaskQueue* work_q = work_queue(i);
-  oop obj_to_scan;
-  CMSBitMap* bm = &(_collector->_markBitMap);
-  CMSMarkStack* ovflw = &(_collector->_markStack);
-  ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw);
-  while (true) {
-    cl.trim_queue(0);
-    assert(work_q->size() == 0, "Should have been emptied above");
-    if (get_work_from_overflow_stack(ovflw, work_q)) {
-      // Can't assert below because the work obtained from the
-      // overflow stack may already have been stolen from us.
-      // assert(work_q->size() > 0, "Work from overflow stack");
-      continue;
-    } else if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
-      assert(oopDesc::is_oop(obj_to_scan), "Should be an oop");
-      assert(bm->isMarked((HeapWord*)obj_to_scan), "Grey object");
-      obj_to_scan->oop_iterate(&cl);
-    } else if (terminator()->offer_termination(&_term_term)) {
-      assert(work_q->size() == 0, "Impossible!");
-      break;
-    } else if (yielding() || should_yield()) {
-      yield();
-    }
-  }
-}
-
-// This is run by the CMS (coordinator) thread.
-void CMSConcMarkingTask::coordinator_yield() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  // First give up the locks, then yield, then re-lock
-  // We should probably use a constructor/destructor idiom to
-  // do this unlock/lock or modify the MutexUnlocker class to
-  // serve our purpose. XXX
-  assert_lock_strong(_bit_map_lock);
-  _bit_map_lock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  _collector->stopTimer();
-  _collector->incrementYields();
-
-  // It is possible for whichever thread initiated the yield request
-  // not to get a chance to wake up and take the bitmap lock between
-  // this thread releasing it and reacquiring it. So, while the
-  // should_yield() flag is on, let's sleep for a bit to give the
-  // other thread a chance to wake up. The limit imposed on the number
-  // of iterations is defensive, to avoid any unforseen circumstances
-  // putting us into an infinite loop. Since it's always been this
-  // (coordinator_yield()) method that was observed to cause the
-  // problem, we are using a parameter (CMSCoordinatorYieldSleepCount)
-  // which is by default non-zero. For the other seven methods that
-  // also perform the yield operation, as are using a different
-  // parameter (CMSYieldSleepCount) which is by default zero. This way we
-  // can enable the sleeping for those methods too, if necessary.
-  // See 6442774.
-  //
-  // We really need to reconsider the synchronization between the GC
-  // thread and the yield-requesting threads in the future and we
-  // should really use wait/notify, which is the recommended
-  // way of doing this type of interaction. Additionally, we should
-  // consolidate the eight methods that do the yield operation and they
-  // are almost identical into one for better maintainability and
-  // readability. See 6445193.
-  //
-  // Tony 2006.06.29
-  for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount &&
-                   ConcurrentMarkSweepThread::should_yield() &&
-                   !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::naked_short_sleep(1);
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _bit_map_lock->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-bool CMSCollector::do_marking_mt() {
-  assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition");
-  uint num_workers = WorkerPolicy::calc_active_conc_workers(conc_workers()->total_workers(),
-                                                            conc_workers()->active_workers(),
-                                                            Threads::number_of_non_daemon_threads());
-  num_workers = conc_workers()->update_active_workers(num_workers);
-  log_info(gc,task)("Using %u workers of %u for marking", num_workers, conc_workers()->total_workers());
-
-  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
-
-  CMSConcMarkingTask tsk(this,
-                         cms_space,
-                         conc_workers(),
-                         task_queues());
-
-  // Since the actual number of workers we get may be different
-  // from the number we requested above, do we need to do anything different
-  // below? In particular, may be we need to subclass the SequantialSubTasksDone
-  // class?? XXX
-  cms_space ->initialize_sequential_subtasks_for_marking(num_workers);
-
-  // Refs discovery is already non-atomic.
-  assert(!ref_processor()->discovery_is_atomic(), "Should be non-atomic");
-  assert(ref_processor()->discovery_is_mt(), "Discovery should be MT");
-  conc_workers()->start_task(&tsk);
-  while (tsk.yielded()) {
-    tsk.coordinator_yield();
-    conc_workers()->continue_task(&tsk);
-  }
-  // If the task was aborted, _restart_addr will be non-NULL
-  assert(tsk.completed() || _restart_addr != NULL, "Inconsistency");
-  while (_restart_addr != NULL) {
-    // XXX For now we do not make use of ABORTED state and have not
-    // yet implemented the right abort semantics (even in the original
-    // single-threaded CMS case). That needs some more investigation
-    // and is deferred for now; see CR# TBF. 07252005YSR. XXX
-    assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency");
-    // If _restart_addr is non-NULL, a marking stack overflow
-    // occurred; we need to do a fresh marking iteration from the
-    // indicated restart address.
-    if (_foregroundGCIsActive) {
-      // We may be running into repeated stack overflows, having
-      // reached the limit of the stack size, while making very
-      // slow forward progress. It may be best to bail out and
-      // let the foreground collector do its job.
-      // Clear _restart_addr, so that foreground GC
-      // works from scratch. This avoids the headache of
-      // a "rescan" which would otherwise be needed because
-      // of the dirty mod union table & card table.
-      _restart_addr = NULL;
-      return false;
-    }
-    // Adjust the task to restart from _restart_addr
-    tsk.reset(_restart_addr);
-    cms_space ->initialize_sequential_subtasks_for_marking(num_workers,
-                  _restart_addr);
-    _restart_addr = NULL;
-    // Get the workers going again
-    conc_workers()->start_task(&tsk);
-    while (tsk.yielded()) {
-      tsk.coordinator_yield();
-      conc_workers()->continue_task(&tsk);
-    }
-  }
-  assert(tsk.completed(), "Inconsistency");
-  assert(tsk.result() == true, "Inconsistency");
-  return true;
-}
-
-bool CMSCollector::do_marking_st() {
-  ResourceMark rm;
-  HandleMark   hm;
-
-  // Temporarily make refs discovery single threaded (non-MT)
-  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
-  MarkFromRootsClosure markFromRootsClosure(this, _span, &_markBitMap,
-    &_markStack, CMSYield);
-  // the last argument to iterate indicates whether the iteration
-  // should be incremental with periodic yields.
-  _markBitMap.iterate(&markFromRootsClosure);
-  // If _restart_addr is non-NULL, a marking stack overflow
-  // occurred; we need to do a fresh iteration from the
-  // indicated restart address.
-  while (_restart_addr != NULL) {
-    if (_foregroundGCIsActive) {
-      // We may be running into repeated stack overflows, having
-      // reached the limit of the stack size, while making very
-      // slow forward progress. It may be best to bail out and
-      // let the foreground collector do its job.
-      // Clear _restart_addr, so that foreground GC
-      // works from scratch. This avoids the headache of
-      // a "rescan" which would otherwise be needed because
-      // of the dirty mod union table & card table.
-      _restart_addr = NULL;
-      return false;  // indicating failure to complete marking
-    }
-    // Deal with stack overflow:
-    // we restart marking from _restart_addr
-    HeapWord* ra = _restart_addr;
-    markFromRootsClosure.reset(ra);
-    _restart_addr = NULL;
-    _markBitMap.iterate(&markFromRootsClosure, ra, _span.end());
-  }
-  return true;
-}
-
-void CMSCollector::preclean() {
-  check_correct_thread_executing();
-  assert(Thread::current()->is_ConcurrentGC_thread(), "Wrong thread");
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-  _abort_preclean = false;
-  if (CMSPrecleaningEnabled) {
-    if (!CMSEdenChunksRecordAlways) {
-      _eden_chunk_index = 0;
-    }
-    size_t used = get_eden_used();
-    size_t capacity = get_eden_capacity();
-    // Don't start sampling unless we will get sufficiently
-    // many samples.
-    if (used < (((capacity / CMSScheduleRemarkSamplingRatio) / 100)
-                * CMSScheduleRemarkEdenPenetration)) {
-      _start_sampling = true;
-    } else {
-      _start_sampling = false;
-    }
-    GCTraceCPUTime tcpu;
-    CMSPhaseAccounting pa(this, "Concurrent Preclean");
-    preclean_work(CMSPrecleanRefLists1, CMSPrecleanSurvivors1);
-  }
-  CMSTokenSync x(true); // is cms thread
-  if (CMSPrecleaningEnabled) {
-    sample_eden();
-    _collectorState = AbortablePreclean;
-  } else {
-    _collectorState = FinalMarking;
-  }
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-}
-
-// Try and schedule the remark such that young gen
-// occupancy is CMSScheduleRemarkEdenPenetration %.
-void CMSCollector::abortable_preclean() {
-  check_correct_thread_executing();
-  assert(CMSPrecleaningEnabled,  "Inconsistent control state");
-  assert(_collectorState == AbortablePreclean, "Inconsistent control state");
-
-  // If Eden's current occupancy is below this threshold,
-  // immediately schedule the remark; else preclean
-  // past the next scavenge in an effort to
-  // schedule the pause as described above. By choosing
-  // CMSScheduleRemarkEdenSizeThreshold >= max eden size
-  // we will never do an actual abortable preclean cycle.
-  if (get_eden_used() > CMSScheduleRemarkEdenSizeThreshold) {
-    GCTraceCPUTime tcpu;
-    CMSPhaseAccounting pa(this, "Concurrent Abortable Preclean");
-    // We need more smarts in the abortable preclean
-    // loop below to deal with cases where allocation
-    // in young gen is very very slow, and our precleaning
-    // is running a losing race against a horde of
-    // mutators intent on flooding us with CMS updates
-    // (dirty cards).
-    // One, admittedly dumb, strategy is to give up
-    // after a certain number of abortable precleaning loops
-    // or after a certain maximum time. We want to make
-    // this smarter in the next iteration.
-    // XXX FIX ME!!! YSR
-    size_t loops = 0, workdone = 0, cumworkdone = 0, waited = 0;
-    while (!(should_abort_preclean() ||
-             ConcurrentMarkSweepThread::cmst()->should_terminate())) {
-      workdone = preclean_work(CMSPrecleanRefLists2, CMSPrecleanSurvivors2);
-      cumworkdone += workdone;
-      loops++;
-      // Voluntarily terminate abortable preclean phase if we have
-      // been at it for too long.
-      if ((CMSMaxAbortablePrecleanLoops != 0) &&
-          loops >= CMSMaxAbortablePrecleanLoops) {
-        log_debug(gc)(" CMS: abort preclean due to loops ");
-        break;
-      }
-      if (pa.wallclock_millis() > CMSMaxAbortablePrecleanTime) {
-        log_debug(gc)(" CMS: abort preclean due to time ");
-        break;
-      }
-      // If we are doing little work each iteration, we should
-      // take a short break.
-      if (workdone < CMSAbortablePrecleanMinWorkPerIteration) {
-        // Sleep for some time, waiting for work to accumulate
-        stopTimer();
-        cmsThread()->wait_on_cms_lock(CMSAbortablePrecleanWaitMillis);
-        startTimer();
-        waited++;
-      }
-    }
-    log_trace(gc)(" [" SIZE_FORMAT " iterations, " SIZE_FORMAT " waits, " SIZE_FORMAT " cards)] ",
-                               loops, waited, cumworkdone);
-  }
-  CMSTokenSync x(true); // is cms thread
-  if (_collectorState != Idling) {
-    assert(_collectorState == AbortablePreclean,
-           "Spontaneous state transition?");
-    _collectorState = FinalMarking;
-  } // Else, a foreground collection completed this CMS cycle.
-  return;
-}
-
-// Respond to an Eden sampling opportunity
-void CMSCollector::sample_eden() {
-  // Make sure a young gc cannot sneak in between our
-  // reading and recording of a sample.
-  assert(Thread::current()->is_ConcurrentGC_thread(),
-         "Only the cms thread may collect Eden samples");
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "Should collect samples while holding CMS token");
-  if (!_start_sampling) {
-    return;
-  }
-  // When CMSEdenChunksRecordAlways is true, the eden chunk array
-  // is populated by the young generation.
-  if (_eden_chunk_array != NULL && !CMSEdenChunksRecordAlways) {
-    if (_eden_chunk_index < _eden_chunk_capacity) {
-      _eden_chunk_array[_eden_chunk_index] = *_top_addr;   // take sample
-      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
-             "Unexpected state of Eden");
-      // We'd like to check that what we just sampled is an oop-start address;
-      // however, we cannot do that here since the object may not yet have been
-      // initialized. So we'll instead do the check when we _use_ this sample
-      // later.
-      if (_eden_chunk_index == 0 ||
-          (pointer_delta(_eden_chunk_array[_eden_chunk_index],
-                         _eden_chunk_array[_eden_chunk_index-1])
-           >= CMSSamplingGrain)) {
-        _eden_chunk_index++;  // commit sample
-      }
-    }
-  }
-  if ((_collectorState == AbortablePreclean) && !_abort_preclean) {
-    size_t used = get_eden_used();
-    size_t capacity = get_eden_capacity();
-    assert(used <= capacity, "Unexpected state of Eden");
-    if (used >  (capacity/100 * CMSScheduleRemarkEdenPenetration)) {
-      _abort_preclean = true;
-    }
-  }
-}
-
-size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
-  assert(_collectorState == Precleaning ||
-         _collectorState == AbortablePreclean, "incorrect state");
-  ResourceMark rm;
-  HandleMark   hm;
-
-  // Precleaning is currently not MT but the reference processor
-  // may be set for MT.  Disable it temporarily here.
-  ReferenceProcessor* rp = ref_processor();
-  ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(rp, false);
-
-  // Do one pass of scrubbing the discovered reference lists
-  // to remove any reference objects with strongly-reachable
-  // referents.
-  if (clean_refs) {
-    CMSPrecleanRefsYieldClosure yield_cl(this);
-    assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
-    CMSKeepAliveClosure keep_alive(this, _span, &_markBitMap,
-                                   &_markStack, true /* preclean */);
-    CMSDrainMarkingStackClosure complete_trace(this,
-                                   _span, &_markBitMap, &_markStack,
-                                   &keep_alive, true /* preclean */);
-
-    // We don't want this step to interfere with a young
-    // collection because we don't want to take CPU
-    // or memory bandwidth away from the young GC threads
-    // (which may be as many as there are CPUs).
-    // Note that we don't need to protect ourselves from
-    // interference with mutators because they can't
-    // manipulate the discovered reference lists nor affect
-    // the computed reachability of the referents, the
-    // only properties manipulated by the precleaning
-    // of these reference lists.
-    stopTimer();
-    CMSTokenSyncWithLocks x(true /* is cms thread */,
-                            bitMapLock());
-    startTimer();
-    sample_eden();
-
-    // The following will yield to allow foreground
-    // collection to proceed promptly. XXX YSR:
-    // The code in this method may need further
-    // tweaking for better performance and some restructuring
-    // for cleaner interfaces.
-    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
-    rp->preclean_discovered_references(
-          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
-          gc_timer);
-  }
-
-  if (clean_survivor) {  // preclean the active survivor space(s)
-    PushAndMarkClosure pam_cl(this, _span, ref_processor(),
-                             &_markBitMap, &_modUnionTable,
-                             &_markStack, true /* precleaning phase */);
-    stopTimer();
-    CMSTokenSyncWithLocks ts(true /* is cms thread */,
-                             bitMapLock());
-    startTimer();
-    unsigned int before_count =
-      CMSHeap::heap()->total_collections();
-    SurvivorSpacePrecleanClosure
-      sss_cl(this, _span, &_markBitMap, &_markStack,
-             &pam_cl, before_count, CMSYield);
-    _young_gen->from()->object_iterate_careful(&sss_cl);
-    _young_gen->to()->object_iterate_careful(&sss_cl);
-  }
-  MarkRefsIntoAndScanClosure
-    mrias_cl(_span, ref_processor(), &_markBitMap, &_modUnionTable,
-             &_markStack, this, CMSYield,
-             true /* precleaning phase */);
-  // CAUTION: The following closure has persistent state that may need to
-  // be reset upon a decrease in the sequence of addresses it
-  // processes.
-  ScanMarkedObjectsAgainCarefullyClosure
-    smoac_cl(this, _span,
-      &_markBitMap, &_markStack, &mrias_cl, CMSYield);
-
-  // Preclean dirty cards in ModUnionTable and CardTable using
-  // appropriate convergence criterion;
-  // repeat CMSPrecleanIter times unless we find that
-  // we are losing.
-  assert(CMSPrecleanIter < 10, "CMSPrecleanIter is too large");
-  assert(CMSPrecleanNumerator < CMSPrecleanDenominator,
-         "Bad convergence multiplier");
-  assert(CMSPrecleanThreshold >= 100,
-         "Unreasonably low CMSPrecleanThreshold");
-
-  size_t numIter, cumNumCards, lastNumCards, curNumCards;
-  for (numIter = 0, cumNumCards = lastNumCards = curNumCards = 0;
-       numIter < CMSPrecleanIter;
-       numIter++, lastNumCards = curNumCards, cumNumCards += curNumCards) {
-    curNumCards  = preclean_mod_union_table(_cmsGen, &smoac_cl);
-    log_trace(gc)(" (modUnionTable: " SIZE_FORMAT " cards)", curNumCards);
-    // Either there are very few dirty cards, so re-mark
-    // pause will be small anyway, or our pre-cleaning isn't
-    // that much faster than the rate at which cards are being
-    // dirtied, so we might as well stop and re-mark since
-    // precleaning won't improve our re-mark time by much.
-    if (curNumCards <= CMSPrecleanThreshold ||
-        (numIter > 0 &&
-         (curNumCards * CMSPrecleanDenominator >
-         lastNumCards * CMSPrecleanNumerator))) {
-      numIter++;
-      cumNumCards += curNumCards;
-      break;
-    }
-  }
-
-  preclean_cld(&mrias_cl, _cmsGen->freelistLock());
-
-  curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
-  cumNumCards += curNumCards;
-  log_trace(gc)(" (cardTable: " SIZE_FORMAT " cards, re-scanned " SIZE_FORMAT " cards, " SIZE_FORMAT " iterations)",
-                             curNumCards, cumNumCards, numIter);
-  return cumNumCards;   // as a measure of useful work done
-}
-
-// PRECLEANING NOTES:
-// Precleaning involves:
-// . reading the bits of the modUnionTable and clearing the set bits.
-// . For the cards corresponding to the set bits, we scan the
-//   objects on those cards. This means we need the free_list_lock
-//   so that we can safely iterate over the CMS space when scanning
-//   for oops.
-// . When we scan the objects, we'll be both reading and setting
-//   marks in the marking bit map, so we'll need the marking bit map.
-// . For protecting _collector_state transitions, we take the CGC_lock.
-//   Note that any races in the reading of of card table entries by the
-//   CMS thread on the one hand and the clearing of those entries by the
-//   VM thread or the setting of those entries by the mutator threads on the
-//   other are quite benign. However, for efficiency it makes sense to keep
-//   the VM thread from racing with the CMS thread while the latter is
-//   dirty card info to the modUnionTable. We therefore also use the
-//   CGC_lock to protect the reading of the card table and the mod union
-//   table by the CM thread.
-// . We run concurrently with mutator updates, so scanning
-//   needs to be done carefully  -- we should not try to scan
-//   potentially uninitialized objects.
-//
-// Locking strategy: While holding the CGC_lock, we scan over and
-// reset a maximal dirty range of the mod union / card tables, then lock
-// the free_list_lock and bitmap lock to do a full marking, then
-// release these locks; and repeat the cycle. This allows for a
-// certain amount of fairness in the sharing of these locks between
-// the CMS collector on the one hand, and the VM thread and the
-// mutators on the other.
-
-// NOTE: preclean_mod_union_table() and preclean_card_table()
-// further below are largely identical; if you need to modify
-// one of these methods, please check the other method too.
-
-size_t CMSCollector::preclean_mod_union_table(
-  ConcurrentMarkSweepGeneration* old_gen,
-  ScanMarkedObjectsAgainCarefullyClosure* cl) {
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-
-  // strategy: starting with the first card, accumulate contiguous
-  // ranges of dirty cards; clear these cards, then scan the region
-  // covered by these cards.
-
-  // Since all of the MUT is committed ahead, we can just use
-  // that, in case the generations expand while we are precleaning.
-  // It might also be fine to just use the committed part of the
-  // generation, but we might potentially miss cards when the
-  // generation is rapidly expanding while we are in the midst
-  // of precleaning.
-  HeapWord* startAddr = old_gen->reserved().start();
-  HeapWord* endAddr   = old_gen->reserved().end();
-
-  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
-
-  size_t numDirtyCards, cumNumDirtyCards;
-  HeapWord *nextAddr, *lastAddr;
-  for (cumNumDirtyCards = numDirtyCards = 0,
-       nextAddr = lastAddr = startAddr;
-       nextAddr < endAddr;
-       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
-
-    ResourceMark rm;
-    HandleMark   hm;
-
-    MemRegion dirtyRegion;
-    {
-      stopTimer();
-      // Potential yield point
-      CMSTokenSync ts(true);
-      startTimer();
-      sample_eden();
-      // Get dirty region starting at nextOffset (inclusive),
-      // simultaneously clearing it.
-      dirtyRegion =
-        _modUnionTable.getAndClearMarkedRegion(nextAddr, endAddr);
-      assert(dirtyRegion.start() >= nextAddr,
-             "returned region inconsistent?");
-    }
-    // Remember where the next search should begin.
-    // The returned region (if non-empty) is a right open interval,
-    // so lastOffset is obtained from the right end of that
-    // interval.
-    lastAddr = dirtyRegion.end();
-    // Should do something more transparent and less hacky XXX
-    numDirtyCards =
-      _modUnionTable.heapWordDiffToOffsetDiff(dirtyRegion.word_size());
-
-    // We'll scan the cards in the dirty region (with periodic
-    // yields for foreground GC as needed).
-    if (!dirtyRegion.is_empty()) {
-      assert(numDirtyCards > 0, "consistency check");
-      HeapWord* stop_point = NULL;
-      stopTimer();
-      // Potential yield point
-      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(),
-                               bitMapLock());
-      startTimer();
-      {
-        verify_work_stacks_empty();
-        verify_overflow_empty();
-        sample_eden();
-        stop_point =
-          old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
-      }
-      if (stop_point != NULL) {
-        // The careful iteration stopped early either because it found an
-        // uninitialized object, or because we were in the midst of an
-        // "abortable preclean", which should now be aborted. Redirty
-        // the bits corresponding to the partially-scanned or unscanned
-        // cards. We'll either restart at the next block boundary or
-        // abort the preclean.
-        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
-               "Should only be AbortablePreclean.");
-        _modUnionTable.mark_range(MemRegion(stop_point, dirtyRegion.end()));
-        if (should_abort_preclean()) {
-          break; // out of preclean loop
-        } else {
-          // Compute the next address at which preclean should pick up;
-          // might need bitMapLock in order to read P-bits.
-          lastAddr = next_card_start_after_block(stop_point);
-        }
-      }
-    } else {
-      assert(lastAddr == endAddr, "consistency check");
-      assert(numDirtyCards == 0, "consistency check");
-      break;
-    }
-  }
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-  return cumNumDirtyCards;
-}
-
-// NOTE: preclean_mod_union_table() above and preclean_card_table()
-// below are largely identical; if you need to modify
-// one of these methods, please check the other method too.
-
-size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
-  ScanMarkedObjectsAgainCarefullyClosure* cl) {
-  // strategy: it's similar to precleamModUnionTable above, in that
-  // we accumulate contiguous ranges of dirty cards, mark these cards
-  // precleaned, then scan the region covered by these cards.
-  HeapWord* endAddr   = (HeapWord*)(old_gen->_virtual_space.high());
-  HeapWord* startAddr = (HeapWord*)(old_gen->_virtual_space.low());
-
-  cl->setFreelistLock(old_gen->freelistLock());   // needed for yielding
-
-  size_t numDirtyCards, cumNumDirtyCards;
-  HeapWord *lastAddr, *nextAddr;
-
-  for (cumNumDirtyCards = numDirtyCards = 0,
-       nextAddr = lastAddr = startAddr;
-       nextAddr < endAddr;
-       nextAddr = lastAddr, cumNumDirtyCards += numDirtyCards) {
-
-    ResourceMark rm;
-    HandleMark   hm;
-
-    MemRegion dirtyRegion;
-    {
-      // See comments in "Precleaning notes" above on why we
-      // do this locking. XXX Could the locking overheads be
-      // too high when dirty cards are sparse? [I don't think so.]
-      stopTimer();
-      CMSTokenSync x(true); // is cms thread
-      startTimer();
-      sample_eden();
-      // Get and clear dirty region from card table
-      dirtyRegion = _ct->dirty_card_range_after_reset(MemRegion(nextAddr, endAddr),
-                                                      true,
-                                                      CardTable::precleaned_card_val());
-
-      assert(dirtyRegion.start() >= nextAddr,
-             "returned region inconsistent?");
-    }
-    lastAddr = dirtyRegion.end();
-    numDirtyCards =
-      dirtyRegion.word_size()/CardTable::card_size_in_words;
-
-    if (!dirtyRegion.is_empty()) {
-      stopTimer();
-      CMSTokenSyncWithLocks ts(true, old_gen->freelistLock(), bitMapLock());
-      startTimer();
-      sample_eden();
-      verify_work_stacks_empty();
-      verify_overflow_empty();
-      HeapWord* stop_point =
-        old_gen->cmsSpace()->object_iterate_careful_m(dirtyRegion, cl);
-      if (stop_point != NULL) {
-        assert((_collectorState == AbortablePreclean && should_abort_preclean()),
-               "Should only be AbortablePreclean.");
-        _ct->invalidate(MemRegion(stop_point, dirtyRegion.end()));
-        if (should_abort_preclean()) {
-          break; // out of preclean loop
-        } else {
-          // Compute the next address at which preclean should pick up.
-          lastAddr = next_card_start_after_block(stop_point);
-        }
-      }
-    } else {
-      break;
-    }
-  }
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-  return cumNumDirtyCards;
-}
-
-class PrecleanCLDClosure : public CLDClosure {
-  MetadataVisitingOopsInGenClosure* _cm_closure;
- public:
-  PrecleanCLDClosure(MetadataVisitingOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
-  void do_cld(ClassLoaderData* cld) {
-    if (cld->has_accumulated_modified_oops()) {
-      cld->clear_accumulated_modified_oops();
-
-      _cm_closure->do_cld(cld);
-    }
-  }
-};
-
-// The freelist lock is needed to prevent asserts, is it really needed?
-void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
-  // Needed to walk CLDG
-  MutexLocker ml(ClassLoaderDataGraph_lock);
-
-  cl->set_freelistLock(freelistLock);
-
-  CMSTokenSyncWithLocks ts(true, freelistLock, bitMapLock());
-
-  // SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
-  // SSS: We should probably check if precleaning should be aborted, at suitable intervals?
-  PrecleanCLDClosure preclean_closure(cl);
-  ClassLoaderDataGraph::cld_do(&preclean_closure);
-
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-}
-
-void CMSCollector::checkpointRootsFinal() {
-  assert(_collectorState == FinalMarking, "incorrect state transition?");
-  check_correct_thread_executing();
-  // world is stopped at this checkpoint
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
-
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-
-  log_debug(gc)("YG occupancy: " SIZE_FORMAT " K (" SIZE_FORMAT " K)",
-                _young_gen->used() / K, _young_gen->capacity() / K);
-  {
-    if (CMSScavengeBeforeRemark) {
-      CMSHeap* heap = CMSHeap::heap();
-      // Temporarily set flag to false, GCH->do_collection will
-      // expect it to be false and set to true
-      FlagSetting fl(heap->_is_gc_active, false);
-
-      heap->do_collection(true,                      // full (i.e. force, see below)
-                          false,                     // !clear_all_soft_refs
-                          0,                         // size
-                          false,                     // is_tlab
-                          GenCollectedHeap::YoungGen // type
-        );
-    }
-    FreelistLocker x(this);
-    MutexLocker y(bitMapLock(),
-                  Mutex::_no_safepoint_check_flag);
-    checkpointRootsFinalWork();
-    _cmsGen->cmsSpace()->recalculate_used_stable();
-  }
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-}
-
-void CMSCollector::checkpointRootsFinalWork() {
-  GCTraceTime(Trace, gc, phases) tm("checkpointRootsFinalWork", _gc_timer_cm);
-
-  assert(haveFreelistLocks(), "must have free list locks");
-  assert_lock_strong(bitMapLock());
-
-  ResourceMark rm;
-  HandleMark   hm;
-
-  CMSHeap* heap = CMSHeap::heap();
-
-  assert(haveFreelistLocks(), "must have free list locks");
-  assert_lock_strong(bitMapLock());
-
-  // We might assume that we need not fill TLAB's when
-  // CMSScavengeBeforeRemark is set, because we may have just done
-  // a scavenge which would have filled all TLAB's -- and besides
-  // Eden would be empty. This however may not always be the case --
-  // for instance although we asked for a scavenge, it may not have
-  // happened because of a JNI critical section. We probably need
-  // a policy for deciding whether we can in that case wait until
-  // the critical section releases and then do the remark following
-  // the scavenge, and skip it here. In the absence of that policy,
-  // or of an indication of whether the scavenge did indeed occur,
-  // we cannot rely on TLAB's having been filled and must do
-  // so here just in case a scavenge did not happen.
-  heap->ensure_parsability(false);  // fill TLAB's, but no need to retire them
-  // Update the saved marks which may affect the root scans.
-  heap->save_marks();
-
-  print_eden_and_survivor_chunk_arrays();
-
-  {
-#if COMPILER2_OR_JVMCI
-    DerivedPointerTableDeactivate dpt_deact;
-#endif
-
-    // Note on the role of the mod union table:
-    // Since the marker in "markFromRoots" marks concurrently with
-    // mutators, it is possible for some reachable objects not to have been
-    // scanned. For instance, an only reference to an object A was
-    // placed in object B after the marker scanned B. Unless B is rescanned,
-    // A would be collected. Such updates to references in marked objects
-    // are detected via the mod union table which is the set of all cards
-    // dirtied since the first checkpoint in this GC cycle and prior to
-    // the most recent young generation GC, minus those cleaned up by the
-    // concurrent precleaning.
-    if (CMSParallelRemarkEnabled) {
-      GCTraceTime(Debug, gc, phases) t("Rescan (parallel)", _gc_timer_cm);
-      do_remark_parallel();
-    } else {
-      GCTraceTime(Debug, gc, phases) t("Rescan (non-parallel)", _gc_timer_cm);
-      do_remark_non_parallel();
-    }
-  }
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-
-  {
-    GCTraceTime(Trace, gc, phases) ts("refProcessingWork", _gc_timer_cm);
-    refProcessingWork();
-  }
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-
-  if (should_unload_classes()) {
-    heap->prune_scavengable_nmethods();
-  }
-
-  // If we encountered any (marking stack / work queue) overflow
-  // events during the current CMS cycle, take appropriate
-  // remedial measures, where possible, so as to try and avoid
-  // recurrence of that condition.
-  assert(_markStack.isEmpty(), "No grey objects");
-  size_t ser_ovflw = _ser_pmc_remark_ovflw + _ser_pmc_preclean_ovflw +
-                     _ser_kac_ovflw        + _ser_kac_preclean_ovflw;
-  if (ser_ovflw > 0) {
-    log_trace(gc)("Marking stack overflow (benign) (pmc_pc=" SIZE_FORMAT ", pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ", kac_preclean=" SIZE_FORMAT ")",
-                         _ser_pmc_preclean_ovflw, _ser_pmc_remark_ovflw, _ser_kac_ovflw, _ser_kac_preclean_ovflw);
-    _markStack.expand();
-    _ser_pmc_remark_ovflw = 0;
-    _ser_pmc_preclean_ovflw = 0;
-    _ser_kac_preclean_ovflw = 0;
-    _ser_kac_ovflw = 0;
-  }
-  if (_par_pmc_remark_ovflw > 0 || _par_kac_ovflw > 0) {
-     log_trace(gc)("Work queue overflow (benign) (pmc_rm=" SIZE_FORMAT ", kac=" SIZE_FORMAT ")",
-                          _par_pmc_remark_ovflw, _par_kac_ovflw);
-     _par_pmc_remark_ovflw = 0;
-    _par_kac_ovflw = 0;
-  }
-   if (_markStack._hit_limit > 0) {
-     log_trace(gc)(" (benign) Hit max stack size limit (" SIZE_FORMAT ")",
-                          _markStack._hit_limit);
-   }
-   if (_markStack._failed_double > 0) {
-     log_trace(gc)(" (benign) Failed stack doubling (" SIZE_FORMAT "), current capacity " SIZE_FORMAT,
-                          _markStack._failed_double, _markStack.capacity());
-   }
-  _markStack._hit_limit = 0;
-  _markStack._failed_double = 0;
-
-  if ((VerifyAfterGC || VerifyDuringGC) &&
-      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    verify_after_remark();
-  }
-
-  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
-
-  // Change under the freelistLocks.
-  _collectorState = Sweeping;
-  // Call isAllClear() under bitMapLock
-  assert(_modUnionTable.isAllClear(),
-      "Should be clear by end of the final marking");
-  assert(_ct->cld_rem_set()->mod_union_is_clear(),
-      "Should be clear by end of the final marking");
-}
-
-void CMSParInitialMarkTask::work(uint worker_id) {
-  elapsedTimer _timer;
-  ResourceMark rm;
-  HandleMark   hm;
-
-  // ---------- scan from roots --------------
-  _timer.start();
-  CMSHeap* heap = CMSHeap::heap();
-  ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap));
-
-  // ---------- young gen roots --------------
-  {
-    work_on_young_gen_roots(&par_mri_cl);
-    _timer.stop();
-    log_trace(gc, task)("Finished young gen initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-  }
-
-  // ---------- remaining roots --------------
-  _timer.reset();
-  _timer.start();
-
-  CLDToOopClosure cld_closure(&par_mri_cl, ClassLoaderData::_claim_strong);
-
-  heap->cms_process_roots(_strong_roots_scope,
-                          false,     // yg was scanned above
-                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                          _collector->should_unload_classes(),
-                          &par_mri_cl,
-                          &cld_closure);
-
-  assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
-         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
-  _timer.stop();
-  log_trace(gc, task)("Finished remaining root initial mark scan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-}
-
-// Parallel remark task
-class CMSParRemarkTask: public CMSParMarkTask {
-  CompactibleFreeListSpace* _cms_space;
-
-  // The per-thread work queues, available here for stealing.
-  OopTaskQueueSet*       _task_queues;
-  TaskTerminator         _term;
-  StrongRootsScope*      _strong_roots_scope;
-
- public:
-  // A value of 0 passed to n_workers will cause the number of
-  // workers to be taken from the active workers in the work gang.
-  CMSParRemarkTask(CMSCollector* collector,
-                   CompactibleFreeListSpace* cms_space,
-                   uint n_workers, WorkGang* workers,
-                   OopTaskQueueSet* task_queues,
-                   StrongRootsScope* strong_roots_scope):
-    CMSParMarkTask("Rescan roots and grey objects in parallel",
-                   collector, n_workers),
-    _cms_space(cms_space),
-    _task_queues(task_queues),
-    _term(n_workers, task_queues),
-    _strong_roots_scope(strong_roots_scope) { }
-
-  OopTaskQueueSet* task_queues() { return _task_queues; }
-
-  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
-  ParallelTaskTerminator* terminator() { return _term.terminator(); }
-  uint n_workers() { return _n_workers; }
-
-  void work(uint worker_id);
-
- private:
-  // ... of  dirty cards in old space
-  void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i,
-                                  ParMarkRefsIntoAndScanClosure* cl);
-
-  // ... work stealing for the above
-  void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl);
-};
-
-class RemarkCLDClosure : public CLDClosure {
-  CLDToOopClosure _cm_closure;
- public:
-  RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure, ClassLoaderData::_claim_strong) {}
-  void do_cld(ClassLoaderData* cld) {
-    // Check if we have modified any oops in the CLD during the concurrent marking.
-    if (cld->has_accumulated_modified_oops()) {
-      cld->clear_accumulated_modified_oops();
-
-      // We could have transfered the current modified marks to the accumulated marks,
-      // like we do with the Card Table to Mod Union Table. But it's not really necessary.
-    } else if (cld->has_modified_oops()) {
-      // Don't clear anything, this info is needed by the next young collection.
-    } else {
-      // No modified oops in the ClassLoaderData.
-      return;
-    }
-
-    // The klass has modified fields, need to scan the klass.
-    _cm_closure.do_cld(cld);
-  }
-};
-
-void CMSParMarkTask::work_on_young_gen_roots(OopsInGenClosure* cl) {
-  ParNewGeneration* young_gen = _collector->_young_gen;
-  ContiguousSpace* eden_space = young_gen->eden();
-  ContiguousSpace* from_space = young_gen->from();
-  ContiguousSpace* to_space   = young_gen->to();
-
-  HeapWord** eca = _collector->_eden_chunk_array;
-  size_t     ect = _collector->_eden_chunk_index;
-  HeapWord** sca = _collector->_survivor_chunk_array;
-  size_t     sct = _collector->_survivor_chunk_index;
-
-  assert(ect <= _collector->_eden_chunk_capacity, "out of bounds");
-  assert(sct <= _collector->_survivor_chunk_capacity, "out of bounds");
-
-  do_young_space_rescan(cl, to_space, NULL, 0);
-  do_young_space_rescan(cl, from_space, sca, sct);
-  do_young_space_rescan(cl, eden_space, eca, ect);
-}
-
-// work_queue(i) is passed to the closure
-// ParMarkRefsIntoAndScanClosure.  The "i" parameter
-// also is passed to do_dirty_card_rescan_tasks() and to
-// do_work_steal() to select the i-th task_queue.
-
-void CMSParRemarkTask::work(uint worker_id) {
-  elapsedTimer _timer;
-  ResourceMark rm;
-  HandleMark   hm;
-
-  // ---------- rescan from roots --------------
-  _timer.start();
-  CMSHeap* heap = CMSHeap::heap();
-  ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector,
-    _collector->_span, _collector->ref_processor(),
-    &(_collector->_markBitMap),
-    work_queue(worker_id));
-
-  // Rescan young gen roots first since these are likely
-  // coarsely partitioned and may, on that account, constitute
-  // the critical path; thus, it's best to start off that
-  // work first.
-  // ---------- young gen roots --------------
-  {
-    work_on_young_gen_roots(&par_mrias_cl);
-    _timer.stop();
-    log_trace(gc, task)("Finished young gen rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-  }
-
-  // ---------- remaining roots --------------
-  _timer.reset();
-  _timer.start();
-  heap->cms_process_roots(_strong_roots_scope,
-                          false,     // yg was scanned above
-                          GenCollectedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
-                          _collector->should_unload_classes(),
-                          &par_mrias_cl,
-                          NULL);     // The dirty klasses will be handled below
-
-  assert(_collector->should_unload_classes()
-         || (_collector->CMSCollector::roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
-         "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
-  _timer.stop();
-  log_trace(gc, task)("Finished remaining root rescan work in %dth thread: %3.3f sec",  worker_id, _timer.seconds());
-
-  // ---------- unhandled CLD scanning ----------
-  if (worker_id == 0) { // Single threaded at the moment.
-    _timer.reset();
-    _timer.start();
-
-    // Scan all new class loader data objects and new dependencies that were
-    // introduced during concurrent marking.
-    ResourceMark rm;
-    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
-    for (int i = 0; i < array->length(); i++) {
-      Devirtualizer::do_cld(&par_mrias_cl, array->at(i));
-    }
-
-    // We don't need to keep track of new CLDs anymore.
-    ClassLoaderDataGraph::remember_new_clds(false);
-
-    _timer.stop();
-    log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-  }
-
-  // We might have added oops to ClassLoaderData::_handles during the
-  // concurrent marking phase. These oops do not always point to newly allocated objects
-  // that are guaranteed to be kept alive.  Hence,
-  // we do have to revisit the _handles block during the remark phase.
-
-  // ---------- dirty CLD scanning ----------
-  if (worker_id == 0) { // Single threaded at the moment.
-    _timer.reset();
-    _timer.start();
-
-    // Scan all classes that was dirtied during the concurrent marking phase.
-    RemarkCLDClosure remark_closure(&par_mrias_cl);
-    ClassLoaderDataGraph::cld_do(&remark_closure);
-
-    _timer.stop();
-    log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-  }
-
-
-  // ---------- rescan dirty cards ------------
-  _timer.reset();
-  _timer.start();
-
-  // Do the rescan tasks for each of the two spaces
-  // (cms_space) in turn.
-  // "worker_id" is passed to select the task_queue for "worker_id"
-  do_dirty_card_rescan_tasks(_cms_space, worker_id, &par_mrias_cl);
-  _timer.stop();
-  log_trace(gc, task)("Finished dirty card rescan work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-
-  // ---------- steal work from other threads ...
-  // ---------- ... and drain overflow list.
-  _timer.reset();
-  _timer.start();
-  do_work_steal(worker_id, &par_mrias_cl);
-  _timer.stop();
-  log_trace(gc, task)("Finished work stealing in %dth thread: %3.3f sec", worker_id, _timer.seconds());
-}
-
-void
-CMSParMarkTask::do_young_space_rescan(
-  OopsInGenClosure* cl, ContiguousSpace* space,
-  HeapWord** chunk_array, size_t chunk_top) {
-  // Until all tasks completed:
-  // . claim an unclaimed task
-  // . compute region boundaries corresponding to task claimed
-  //   using chunk_array
-  // . par_oop_iterate(cl) over that region
-
-  ResourceMark rm;
-  HandleMark   hm;
-
-  SequentialSubTasksDone* pst = space->par_seq_tasks();
-
-  uint nth_task = 0;
-  uint n_tasks  = pst->n_tasks();
-
-  if (n_tasks > 0) {
-    assert(pst->valid(), "Uninitialized use?");
-    HeapWord *start, *end;
-    while (pst->try_claim_task(/* reference */ nth_task)) {
-      // We claimed task # nth_task; compute its boundaries.
-      if (chunk_top == 0) {  // no samples were taken
-        assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
-        start = space->bottom();
-        end   = space->top();
-      } else if (nth_task == 0) {
-        start = space->bottom();
-        end   = chunk_array[nth_task];
-      } else if (nth_task < (uint)chunk_top) {
-        assert(nth_task >= 1, "Control point invariant");
-        start = chunk_array[nth_task - 1];
-        end   = chunk_array[nth_task];
-      } else {
-        assert(nth_task == (uint)chunk_top, "Control point invariant");
-        start = chunk_array[chunk_top - 1];
-        end   = space->top();
-      }
-      MemRegion mr(start, end);
-      // Verify that mr is in space
-      assert(mr.is_empty() || space->used_region().contains(mr),
-             "Should be in space");
-      // Verify that "start" is an object boundary
-      assert(mr.is_empty() || oopDesc::is_oop(oop(mr.start())),
-             "Should be an oop");
-      space->par_oop_iterate(mr, cl);
-    }
-    pst->all_tasks_completed();
-  }
-}
-
-void
-CMSParRemarkTask::do_dirty_card_rescan_tasks(
-  CompactibleFreeListSpace* sp, int i,
-  ParMarkRefsIntoAndScanClosure* cl) {
-  // Until all tasks completed:
-  // . claim an unclaimed task
-  // . compute region boundaries corresponding to task claimed
-  // . transfer dirty bits ct->mut for that region
-  // . apply rescanclosure to dirty mut bits for that region
-
-  ResourceMark rm;
-  HandleMark   hm;
-
-  OopTaskQueue* work_q = work_queue(i);
-  ModUnionClosure modUnionClosure(&(_collector->_modUnionTable));
-  // CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION! CAUTION!
-  // CAUTION: This closure has state that persists across calls to
-  // the work method dirty_range_iterate_clear() in that it has
-  // embedded in it a (subtype of) UpwardsObjectClosure. The
-  // use of that state in the embedded UpwardsObjectClosure instance
-  // assumes that the cards are always iterated (even if in parallel
-  // by several threads) in monotonically increasing order per each
-  // thread. This is true of the implementation below which picks
-  // card ranges (chunks) in monotonically increasing order globally
-  // and, a-fortiori, in monotonically increasing order per thread
-  // (the latter order being a subsequence of the former).
-  // If the work code below is ever reorganized into a more chaotic
-  // work-partitioning form than the current "sequential tasks"
-  // paradigm, the use of that persistent state will have to be
-  // revisited and modified appropriately. See also related
-  // bug 4756801 work on which should examine this code to make
-  // sure that the changes there do not run counter to the
-  // assumptions made here and necessary for correctness and
-  // efficiency. Note also that this code might yield inefficient
-  // behavior in the case of very large objects that span one or
-  // more work chunks. Such objects would potentially be scanned
-  // several times redundantly. Work on 4756801 should try and
-  // address that performance anomaly if at all possible. XXX
-  MemRegion  full_span  = _collector->_span;
-  CMSBitMap* bm    = &(_collector->_markBitMap);     // shared
-  MarkFromDirtyCardsClosure
-    greyRescanClosure(_collector, full_span, // entire span of interest
-                      sp, bm, work_q, cl);
-
-  SequentialSubTasksDone* pst = sp->conc_par_seq_tasks();
-  assert(pst->valid(), "Uninitialized use?");
-  uint nth_task = 0;
-  const int alignment = CardTable::card_size * BitsPerWord;
-  MemRegion span = sp->used_region();
-  HeapWord* start_addr = span.start();
-  HeapWord* end_addr = align_up(span.end(), alignment);
-  const size_t chunk_size = sp->rescan_task_size(); // in HeapWord units
-  assert(is_aligned(start_addr, alignment), "Check alignment");
-  assert(is_aligned(chunk_size, alignment), "Check alignment");
-
-  while (pst->try_claim_task(/* reference */ nth_task)) {
-    // Having claimed the nth_task, compute corresponding mem-region,
-    // which is a-fortiori aligned correctly (i.e. at a MUT boundary).
-    // The alignment restriction ensures that we do not need any
-    // synchronization with other gang-workers while setting or
-    // clearing bits in thus chunk of the MUT.
-    MemRegion this_span = MemRegion(start_addr + nth_task*chunk_size,
-                                    start_addr + (nth_task+1)*chunk_size);
-    // The last chunk's end might be way beyond end of the
-    // used region. In that case pull back appropriately.
-    if (this_span.end() > end_addr) {
-      this_span.set_end(end_addr);
-      assert(!this_span.is_empty(), "Program logic (calculation of n_tasks)");
-    }
-    // Iterate over the dirty cards covering this chunk, marking them
-    // precleaned, and setting the corresponding bits in the mod union
-    // table. Since we have been careful to partition at Card and MUT-word
-    // boundaries no synchronization is needed between parallel threads.
-    _collector->_ct->dirty_card_iterate(this_span,
-                                                 &modUnionClosure);
-
-    // Having transferred these marks into the modUnionTable,
-    // rescan the marked objects on the dirty cards in the modUnionTable.
-    // Even if this is at a synchronous collection, the initial marking
-    // may have been done during an asynchronous collection so there
-    // may be dirty bits in the mod-union table.
-    _collector->_modUnionTable.dirty_range_iterate_clear(
-                  this_span, &greyRescanClosure);
-    _collector->_modUnionTable.verifyNoOneBitsInRange(
-                                 this_span.start(),
-                                 this_span.end());
-  }
-  pst->all_tasks_completed();  // declare that i am done
-}
-
-// . see if we can share work_queues with ParNew? XXX
-void
-CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl) {
-  OopTaskQueue* work_q = work_queue(i);
-  NOT_PRODUCT(int num_steals = 0;)
-  oop obj_to_scan;
-  CMSBitMap* bm = &(_collector->_markBitMap);
-
-  while (true) {
-    // Completely finish any left over work from (an) earlier round(s)
-    cl->trim_queue(0);
-    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
-                                         (size_t)ParGCDesiredObjsFromOverflowList);
-    // Now check if there's any work in the overflow list
-    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
-    // only affects the number of attempts made to get work from the
-    // overflow list and does not affect the number of workers.  Just
-    // pass ParallelGCThreads so this behavior is unchanged.
-    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
-                                                work_q,
-                                                ParallelGCThreads)) {
-      // found something in global overflow list;
-      // not yet ready to go stealing work from others.
-      // We'd like to assert(work_q->size() != 0, ...)
-      // because we just took work from the overflow list,
-      // but of course we can't since all of that could have
-      // been already stolen from us.
-      // "He giveth and He taketh away."
-      continue;
-    }
-    // Verify that we have no work before we resort to stealing
-    assert(work_q->size() == 0, "Have work, shouldn't steal");
-    // Try to steal from other queues that have work
-    if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
-      NOT_PRODUCT(num_steals++;)
-      assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
-      assert(bm->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
-      // Do scanning work
-      obj_to_scan->oop_iterate(cl);
-      // Loop around, finish this work, and try to steal some more
-    } else if (terminator()->offer_termination()) {
-        break;  // nirvana from the infinite cycle
-    }
-  }
-  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
-  assert(work_q->size() == 0 && _collector->overflow_list_is_empty(),
-         "Else our work is not yet done");
-}
-
-// Record object boundaries in _eden_chunk_array by sampling the eden
-// top in the slow-path eden object allocation code path and record
-// the boundaries, if CMSEdenChunksRecordAlways is true. If
-// CMSEdenChunksRecordAlways is false, we use the other asynchronous
-// sampling in sample_eden() that activates during the part of the
-// preclean phase.
-void CMSCollector::sample_eden_chunk() {
-  if (CMSEdenChunksRecordAlways && _eden_chunk_array != NULL) {
-    if (_eden_chunk_lock->try_lock()) {
-      // Record a sample. This is the critical section. The contents
-      // of the _eden_chunk_array have to be non-decreasing in the
-      // address order.
-      _eden_chunk_array[_eden_chunk_index] = *_top_addr;
-      assert(_eden_chunk_array[_eden_chunk_index] <= *_end_addr,
-             "Unexpected state of Eden");
-      if (_eden_chunk_index == 0 ||
-          ((_eden_chunk_array[_eden_chunk_index] > _eden_chunk_array[_eden_chunk_index-1]) &&
-           (pointer_delta(_eden_chunk_array[_eden_chunk_index],
-                          _eden_chunk_array[_eden_chunk_index-1]) >= CMSSamplingGrain))) {
-        _eden_chunk_index++;  // commit sample
-      }
-      _eden_chunk_lock->unlock();
-    }
-  }
-}
-
-// Return a thread-local PLAB recording array, as appropriate.
-void* CMSCollector::get_data_recorder(int thr_num) {
-  if (_survivor_plab_array != NULL &&
-      (CMSPLABRecordAlways ||
-       (_collectorState > Marking && _collectorState < FinalMarking))) {
-    assert(thr_num < (int)ParallelGCThreads, "thr_num is out of bounds");
-    ChunkArray* ca = &_survivor_plab_array[thr_num];
-    ca->reset();   // clear it so that fresh data is recorded
-    return (void*) ca;
-  } else {
-    return NULL;
-  }
-}
-
-// Reset all the thread-local PLAB recording arrays
-void CMSCollector::reset_survivor_plab_arrays() {
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _survivor_plab_array[i].reset();
-  }
-}
-
-// Merge the per-thread plab arrays into the global survivor chunk
-// array which will provide the partitioning of the survivor space
-// for CMS initial scan and rescan.
-void CMSCollector::merge_survivor_plab_arrays(ContiguousSpace* surv,
-                                              int no_of_gc_threads) {
-  assert(_survivor_plab_array  != NULL, "Error");
-  assert(_survivor_chunk_array != NULL, "Error");
-  assert(_collectorState == FinalMarking ||
-         (CMSParallelInitialMarkEnabled && _collectorState == InitialMarking), "Error");
-  for (int j = 0; j < no_of_gc_threads; j++) {
-    _cursor[j] = 0;
-  }
-  HeapWord* top = surv->top();
-  size_t i;
-  for (i = 0; i < _survivor_chunk_capacity; i++) {  // all sca entries
-    HeapWord* min_val = top;          // Higher than any PLAB address
-    uint      min_tid = 0;            // position of min_val this round
-    for (int j = 0; j < no_of_gc_threads; j++) {
-      ChunkArray* cur_sca = &_survivor_plab_array[j];
-      if (_cursor[j] == cur_sca->end()) {
-        continue;
-      }
-      assert(_cursor[j] < cur_sca->end(), "ctl pt invariant");
-      HeapWord* cur_val = cur_sca->nth(_cursor[j]);
-      assert(surv->used_region().contains(cur_val), "Out of bounds value");
-      if (cur_val < min_val) {
-        min_tid = j;
-        min_val = cur_val;
-      } else {
-        assert(cur_val < top, "All recorded addresses should be less");
-      }
-    }
-    // At this point min_val and min_tid are respectively
-    // the least address in _survivor_plab_array[j]->nth(_cursor[j])
-    // and the thread (j) that witnesses that address.
-    // We record this address in the _survivor_chunk_array[i]
-    // and increment _cursor[min_tid] prior to the next round i.
-    if (min_val == top) {
-      break;
-    }
-    _survivor_chunk_array[i] = min_val;
-    _cursor[min_tid]++;
-  }
-  // We are all done; record the size of the _survivor_chunk_array
-  _survivor_chunk_index = i; // exclusive: [0, i)
-  log_trace(gc, survivor)(" (Survivor:" SIZE_FORMAT "chunks) ", i);
-  // Verify that we used up all the recorded entries
-  #ifdef ASSERT
-    size_t total = 0;
-    for (int j = 0; j < no_of_gc_threads; j++) {
-      assert(_cursor[j] == _survivor_plab_array[j].end(), "Ctl pt invariant");
-      total += _cursor[j];
-    }
-    assert(total == _survivor_chunk_index, "Ctl Pt Invariant");
-    // Check that the merged array is in sorted order
-    if (total > 0) {
-      for (size_t i = 0; i < total - 1; i++) {
-        log_develop_trace(gc, survivor)(" (chunk" SIZE_FORMAT ":" INTPTR_FORMAT ") ",
-                                     i, p2i(_survivor_chunk_array[i]));
-        assert(_survivor_chunk_array[i] < _survivor_chunk_array[i+1],
-               "Not sorted");
-      }
-    }
-  #endif // ASSERT
-}
-
-// Set up the space's par_seq_tasks structure for work claiming
-// for parallel initial scan and rescan of young gen.
-// See ParRescanTask where this is currently used.
-void
-CMSCollector::
-initialize_sequential_subtasks_for_young_gen_rescan(int n_threads) {
-  assert(n_threads > 0, "Unexpected n_threads argument");
-
-  // Eden space
-  if (!_young_gen->eden()->is_empty()) {
-    SequentialSubTasksDone* pst = _young_gen->eden()->par_seq_tasks();
-    assert(!pst->valid(), "Clobbering existing data?");
-    // Each valid entry in [0, _eden_chunk_index) represents a task.
-    size_t n_tasks = _eden_chunk_index + 1;
-    assert(n_tasks == 1 || _eden_chunk_array != NULL, "Error");
-    // Sets the condition for completion of the subtask (how many threads
-    // need to finish in order to be done).
-    pst->set_n_threads(n_threads);
-    pst->set_n_tasks((int)n_tasks);
-  }
-
-  // Merge the survivor plab arrays into _survivor_chunk_array
-  if (_survivor_plab_array != NULL) {
-    merge_survivor_plab_arrays(_young_gen->from(), n_threads);
-  } else {
-    assert(_survivor_chunk_index == 0, "Error");
-  }
-
-  // To space
-  {
-    SequentialSubTasksDone* pst = _young_gen->to()->par_seq_tasks();
-    assert(!pst->valid(), "Clobbering existing data?");
-    // Sets the condition for completion of the subtask (how many threads
-    // need to finish in order to be done).
-    pst->set_n_threads(n_threads);
-    pst->set_n_tasks(1);
-    assert(pst->valid(), "Error");
-  }
-
-  // From space
-  {
-    SequentialSubTasksDone* pst = _young_gen->from()->par_seq_tasks();
-    assert(!pst->valid(), "Clobbering existing data?");
-    size_t n_tasks = _survivor_chunk_index + 1;
-    assert(n_tasks == 1 || _survivor_chunk_array != NULL, "Error");
-    // Sets the condition for completion of the subtask (how many threads
-    // need to finish in order to be done).
-    pst->set_n_threads(n_threads);
-    pst->set_n_tasks((int)n_tasks);
-    assert(pst->valid(), "Error");
-  }
-}
-
-// Parallel version of remark
-void CMSCollector::do_remark_parallel() {
-  CMSHeap* heap = CMSHeap::heap();
-  WorkGang* workers = heap->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  // Choose to use the number of GC workers most recently set
-  // into "active_workers".
-  uint n_workers = workers->active_workers();
-
-  CompactibleFreeListSpace* cms_space  = _cmsGen->cmsSpace();
-
-  StrongRootsScope srs(n_workers);
-
-  CMSParRemarkTask tsk(this, cms_space, n_workers, workers, task_queues(), &srs);
-
-  // We won't be iterating over the cards in the card table updating
-  // the younger_gen cards, so we shouldn't call the following else
-  // the verification code as well as subsequent younger_refs_iterate
-  // code would get confused. XXX
-  // heap->rem_set()->prepare_for_younger_refs_iterate(true); // parallel
-
-  // The young gen rescan work will not be done as part of
-  // process_roots (which currently doesn't know how to
-  // parallelize such a scan), but rather will be broken up into
-  // a set of parallel tasks (via the sampling that the [abortable]
-  // preclean phase did of eden, plus the [two] tasks of
-  // scanning the [two] survivor spaces. Further fine-grain
-  // parallelization of the scanning of the survivor spaces
-  // themselves, and of precleaning of the young gen itself
-  // is deferred to the future.
-  initialize_sequential_subtasks_for_young_gen_rescan(n_workers);
-
-  // The dirty card rescan work is broken up into a "sequence"
-  // of parallel tasks (per constituent space) that are dynamically
-  // claimed by the parallel threads.
-  cms_space->initialize_sequential_subtasks_for_rescan(n_workers);
-
-  // It turns out that even when we're using 1 thread, doing the work in a
-  // separate thread causes wide variance in run times.  We can't help this
-  // in the multi-threaded case, but we special-case n=1 here to get
-  // repeatable measurements of the 1-thread overhead of the parallel code.
-  if (n_workers > 1) {
-    // Make refs discovery MT-safe, if it isn't already: it may not
-    // necessarily be so, since it's possible that we are doing
-    // ST marking.
-    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), true);
-    workers->run_task(&tsk);
-  } else {
-    ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
-    tsk.work(0);
-  }
-
-  // restore, single-threaded for now, any preserved marks
-  // as a result of work_q overflow
-  restore_preserved_marks_if_any();
-}
-
-// Non-parallel version of remark
-void CMSCollector::do_remark_non_parallel() {
-  ResourceMark rm;
-  HandleMark   hm;
-  CMSHeap* heap = CMSHeap::heap();
-  ReferenceProcessorMTDiscoveryMutator mt(ref_processor(), false);
-
-  MarkRefsIntoAndScanClosure
-    mrias_cl(_span, ref_processor(), &_markBitMap, NULL /* not precleaning */,
-             &_markStack, this,
-             false /* should_yield */, false /* not precleaning */);
-  MarkFromDirtyCardsClosure
-    markFromDirtyCardsClosure(this, _span,
-                              NULL,  // space is set further below
-                              &_markBitMap, &_markStack, &mrias_cl);
-  {
-    GCTraceTime(Trace, gc, phases) t("Grey Object Rescan", _gc_timer_cm);
-    // Iterate over the dirty cards, setting the corresponding bits in the
-    // mod union table.
-    {
-      ModUnionClosure modUnionClosure(&_modUnionTable);
-      _ct->dirty_card_iterate(_cmsGen->used_region(),
-                              &modUnionClosure);
-    }
-    // Having transferred these marks into the modUnionTable, we just need
-    // to rescan the marked objects on the dirty cards in the modUnionTable.
-    // The initial marking may have been done during an asynchronous
-    // collection so there may be dirty bits in the mod-union table.
-    const int alignment = CardTable::card_size * BitsPerWord;
-    {
-      // ... First handle dirty cards in CMS gen
-      markFromDirtyCardsClosure.set_space(_cmsGen->cmsSpace());
-      MemRegion ur = _cmsGen->used_region();
-      HeapWord* lb = ur.start();
-      HeapWord* ub = align_up(ur.end(), alignment);
-      MemRegion cms_span(lb, ub);
-      _modUnionTable.dirty_range_iterate_clear(cms_span,
-                                               &markFromDirtyCardsClosure);
-      verify_work_stacks_empty();
-      log_trace(gc)(" (re-scanned " SIZE_FORMAT " dirty cards in cms gen) ", markFromDirtyCardsClosure.num_dirty_cards());
-    }
-  }
-  if (VerifyDuringGC &&
-      CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
-    HandleMark hm;  // Discard invalid handles created during verification
-    Universe::verify();
-  }
-  {
-    GCTraceTime(Trace, gc, phases) t("Root Rescan", _gc_timer_cm);
-
-    verify_work_stacks_empty();
-
-    heap->rem_set()->prepare_for_younger_refs_iterate(false); // Not parallel.
-    StrongRootsScope srs(1);
-
-    heap->cms_process_roots(&srs,
-                            true,  // young gen as roots
-                            GenCollectedHeap::ScanningOption(roots_scanning_options()),
-                            should_unload_classes(),
-                            &mrias_cl,
-                            NULL); // The dirty klasses will be handled below
-
-    assert(should_unload_classes()
-           || (roots_scanning_options() & GenCollectedHeap::SO_AllCodeCache),
-           "if we didn't scan the code cache, we have to be ready to drop nmethods with expired weak oops");
-  }
-
-  {
-    GCTraceTime(Trace, gc, phases) t("Visit Unhandled CLDs", _gc_timer_cm);
-
-    verify_work_stacks_empty();
-
-    // Scan all class loader data objects that might have been introduced
-    // during concurrent marking.
-    ResourceMark rm;
-    GrowableArray<ClassLoaderData*>* array = ClassLoaderDataGraph::new_clds();
-    for (int i = 0; i < array->length(); i++) {
-      Devirtualizer::do_cld(&mrias_cl, array->at(i));
-    }
-
-    // We don't need to keep track of new CLDs anymore.
-    ClassLoaderDataGraph::remember_new_clds(false);
-
-    verify_work_stacks_empty();
-  }
-
-  // We might have added oops to ClassLoaderData::_handles during the
-  // concurrent marking phase. These oops do not point to newly allocated objects
-  // that are guaranteed to be kept alive.  Hence,
-  // we do have to revisit the _handles block during the remark phase.
-  {
-    GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
-
-    verify_work_stacks_empty();
-
-    RemarkCLDClosure remark_closure(&mrias_cl);
-    ClassLoaderDataGraph::cld_do(&remark_closure);
-
-    verify_work_stacks_empty();
-  }
-
-  verify_work_stacks_empty();
-  // Restore evacuated mark words, if any, used for overflow list links
-  restore_preserved_marks_if_any();
-
-  verify_overflow_empty();
-}
-
-////////////////////////////////////////////////////////
-// Parallel Reference Processing Task Proxy Class
-////////////////////////////////////////////////////////
-class AbstractGangTaskWOopQueues : public AbstractGangTask {
-  OopTaskQueueSet*       _queues;
-  TaskTerminator         _terminator;
- public:
-  AbstractGangTaskWOopQueues(const char* name, OopTaskQueueSet* queues, uint n_threads) :
-    AbstractGangTask(name), _queues(queues), _terminator(n_threads, _queues) {}
-  ParallelTaskTerminator* terminator() { return _terminator.terminator(); }
-  OopTaskQueueSet* queues() { return _queues; }
-};
-
-class CMSRefProcTaskProxy: public AbstractGangTaskWOopQueues {
-  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
-  CMSCollector*          _collector;
-  CMSBitMap*             _mark_bit_map;
-  const MemRegion        _span;
-  ProcessTask&           _task;
-
-public:
-  CMSRefProcTaskProxy(ProcessTask&     task,
-                      CMSCollector*    collector,
-                      const MemRegion& span,
-                      CMSBitMap*       mark_bit_map,
-                      AbstractWorkGang* workers,
-                      OopTaskQueueSet* task_queues):
-    AbstractGangTaskWOopQueues("Process referents by policy in parallel",
-      task_queues,
-      workers->active_workers()),
-    _collector(collector),
-    _mark_bit_map(mark_bit_map),
-    _span(span),
-    _task(task)
-  {
-    assert(_collector->_span.equals(_span) && !_span.is_empty(),
-           "Inconsistency in _span");
-  }
-
-  OopTaskQueueSet* task_queues() { return queues(); }
-
-  OopTaskQueue* work_queue(int i) { return task_queues()->queue(i); }
-
-  void do_work_steal(int i,
-                     CMSParDrainMarkingStackClosure* drain,
-                     CMSParKeepAliveClosure* keep_alive);
-
-  virtual void work(uint worker_id);
-};
-
-void CMSRefProcTaskProxy::work(uint worker_id) {
-  ResourceMark rm;
-  HandleMark hm;
-  assert(_collector->_span.equals(_span), "Inconsistency in _span");
-  CMSParKeepAliveClosure par_keep_alive(_collector, _span,
-                                        _mark_bit_map,
-                                        work_queue(worker_id));
-  CMSParDrainMarkingStackClosure par_drain_stack(_collector, _span,
-                                                 _mark_bit_map,
-                                                 work_queue(worker_id));
-  CMSIsAliveClosure is_alive_closure(_span, _mark_bit_map);
-  _task.work(worker_id, is_alive_closure, par_keep_alive, par_drain_stack);
-  if (_task.marks_oops_alive()) {
-    do_work_steal(worker_id, &par_drain_stack, &par_keep_alive);
-  }
-  assert(work_queue(worker_id)->size() == 0, "work_queue should be empty");
-  assert(_collector->_overflow_list == NULL, "non-empty _overflow_list");
-}
-
-CMSParKeepAliveClosure::CMSParKeepAliveClosure(CMSCollector* collector,
-  MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue):
-   _span(span),
-   _work_queue(work_queue),
-   _bit_map(bit_map),
-   _mark_and_push(collector, span, bit_map, work_queue),
-   _low_water_mark(MIN2((work_queue->max_elems()/4),
-                        ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads)))
-{ }
-
-// . see if we can share work_queues with ParNew? XXX
-void CMSRefProcTaskProxy::do_work_steal(int i,
-  CMSParDrainMarkingStackClosure* drain,
-  CMSParKeepAliveClosure* keep_alive) {
-  OopTaskQueue* work_q = work_queue(i);
-  NOT_PRODUCT(int num_steals = 0;)
-  oop obj_to_scan;
-
-  while (true) {
-    // Completely finish any left over work from (an) earlier round(s)
-    drain->trim_queue(0);
-    size_t num_from_overflow_list = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
-                                         (size_t)ParGCDesiredObjsFromOverflowList);
-    // Now check if there's any work in the overflow list
-    // Passing ParallelGCThreads as the third parameter, no_of_gc_threads,
-    // only affects the number of attempts made to get work from the
-    // overflow list and does not affect the number of workers.  Just
-    // pass ParallelGCThreads so this behavior is unchanged.
-    if (_collector->par_take_from_overflow_list(num_from_overflow_list,
-                                                work_q,
-                                                ParallelGCThreads)) {
-      // Found something in global overflow list;
-      // not yet ready to go stealing work from others.
-      // We'd like to assert(work_q->size() != 0, ...)
-      // because we just took work from the overflow list,
-      // but of course we can't, since all of that might have
-      // been already stolen from us.
-      continue;
-    }
-    // Verify that we have no work before we resort to stealing
-    assert(work_q->size() == 0, "Have work, shouldn't steal");
-    // Try to steal from other queues that have work
-    if (task_queues()->steal(i, /* reference */ obj_to_scan)) {
-      NOT_PRODUCT(num_steals++;)
-      assert(oopDesc::is_oop(obj_to_scan), "Oops, not an oop!");
-      assert(_mark_bit_map->isMarked((HeapWord*)obj_to_scan), "Stole an unmarked oop?");
-      // Do scanning work
-      obj_to_scan->oop_iterate(keep_alive);
-      // Loop around, finish this work, and try to steal some more
-    } else if (terminator()->offer_termination()) {
-      break;  // nirvana from the infinite cycle
-    }
-  }
-  log_develop_trace(gc, task)("\t(%d: stole %d oops)", i, num_steals);
-}
-
-void CMSRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) {
-  CMSHeap* heap = CMSHeap::heap();
-  WorkGang* workers = heap->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  assert(workers->active_workers() == ergo_workers,
-         "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
-         ergo_workers, workers->active_workers());
-  CMSRefProcTaskProxy rp_task(task, &_collector,
-                              _collector.ref_processor_span(),
-                              _collector.markBitMap(),
-                              workers, _collector.task_queues());
-  workers->run_task(&rp_task, workers->active_workers());
-}
-
-void CMSCollector::refProcessingWork() {
-  ResourceMark rm;
-  HandleMark   hm;
-
-  ReferenceProcessor* rp = ref_processor();
-  assert(_span_based_discoverer.span().equals(_span), "Spans should be equal");
-  assert(!rp->enqueuing_is_done(), "Enqueuing should not be complete");
-  // Process weak references.
-  rp->setup_policy(false);
-  verify_work_stacks_empty();
-
-  ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
-  {
-    GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
-
-    // Setup keep_alive and complete closures.
-    CMSKeepAliveClosure cmsKeepAliveClosure(this, _span, &_markBitMap,
-                                            &_markStack, false /* !preclean */);
-    CMSDrainMarkingStackClosure cmsDrainMarkingStackClosure(this,
-                                  _span, &_markBitMap, &_markStack,
-                                  &cmsKeepAliveClosure, false /* !preclean */);
-
-    ReferenceProcessorStats stats;
-    if (rp->processing_is_mt()) {
-      // Set the degree of MT here.  If the discovery is done MT, there
-      // may have been a different number of threads doing the discovery
-      // and a different number of discovered lists may have Ref objects.
-      // That is OK as long as the Reference lists are balanced (see
-      // balance_all_queues() and balance_queues()).
-      CMSHeap* heap = CMSHeap::heap();
-      uint active_workers = ParallelGCThreads;
-      WorkGang* workers = heap->workers();
-      if (workers != NULL) {
-        active_workers = workers->active_workers();
-        // The expectation is that active_workers will have already
-        // been set to a reasonable value.  If it has not been set,
-        // investigate.
-        assert(active_workers > 0, "Should have been set during scavenge");
-      }
-      rp->set_active_mt_degree(active_workers);
-      CMSRefProcTaskExecutor task_executor(*this);
-      stats = rp->process_discovered_references(&_is_alive_closure,
-                                        &cmsKeepAliveClosure,
-                                        &cmsDrainMarkingStackClosure,
-                                        &task_executor,
-                                        &pt);
-    } else {
-      stats = rp->process_discovered_references(&_is_alive_closure,
-                                        &cmsKeepAliveClosure,
-                                        &cmsDrainMarkingStackClosure,
-                                        NULL,
-                                        &pt);
-    }
-    _gc_tracer_cm->report_gc_reference_stats(stats);
-    pt.print_all_references();
-  }
-
-  // This is the point where the entire marking should have completed.
-  verify_work_stacks_empty();
-
-  {
-    GCTraceTime(Debug, gc, phases) t("Weak Processing", _gc_timer_cm);
-    WeakProcessor::weak_oops_do(&_is_alive_closure, &do_nothing_cl);
-  }
-
-  if (should_unload_classes()) {
-    {
-      GCTraceTime(Debug, gc, phases) t("Class Unloading", _gc_timer_cm);
-
-      // Unload classes and purge the SystemDictionary.
-      bool purged_class = SystemDictionary::do_unloading(_gc_timer_cm);
-
-      // Unload nmethods.
-      CodeCache::do_unloading(&_is_alive_closure, purged_class);
-
-      // Prune dead klasses from subklass/sibling/implementor lists.
-      Klass::clean_weak_klass_links(purged_class);
-
-      // Clean JVMCI metadata handles.
-      JVMCI_ONLY(JVMCI::do_unloading(purged_class));
-    }
-  }
-
-  // Restore any preserved marks as a result of mark stack or
-  // work queue overflow
-  restore_preserved_marks_if_any();  // done single-threaded for now
-
-  rp->set_enqueuing_is_done(true);
-  rp->verify_no_references_recorded();
-}
-
-#ifndef PRODUCT
-void CMSCollector::check_correct_thread_executing() {
-  Thread* t = Thread::current();
-  // Only the VM thread or the CMS thread should be here.
-  assert(t->is_ConcurrentGC_thread() || t->is_VM_thread(),
-         "Unexpected thread type");
-  // If this is the vm thread, the foreground process
-  // should not be waiting.  Note that _foregroundGCIsActive is
-  // true while the foreground collector is waiting.
-  if (_foregroundGCShouldWait) {
-    // We cannot be the VM thread
-    assert(t->is_ConcurrentGC_thread(),
-           "Should be CMS thread");
-  } else {
-    // We can be the CMS thread only if we are in a stop-world
-    // phase of CMS collection.
-    if (t->is_ConcurrentGC_thread()) {
-      assert(_collectorState == InitialMarking ||
-             _collectorState == FinalMarking,
-             "Should be a stop-world phase");
-      // The CMS thread should be holding the CMS_token.
-      assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-             "Potential interference with concurrently "
-             "executing VM thread");
-    }
-  }
-}
-#endif
-
-void CMSCollector::sweep() {
-  assert(_collectorState == Sweeping, "just checking");
-  check_correct_thread_executing();
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-  increment_sweep_count();
-  TraceCMSMemoryManagerStats tms(_collectorState, CMSHeap::heap()->gc_cause());
-
-  _inter_sweep_timer.stop();
-  _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
-
-  assert(!_intra_sweep_timer.is_active(), "Should not be active");
-  _intra_sweep_timer.reset();
-  _intra_sweep_timer.start();
-  {
-    GCTraceCPUTime tcpu;
-    CMSPhaseAccounting pa(this, "Concurrent Sweep");
-    // First sweep the old gen
-    {
-      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock(),
-                               bitMapLock());
-      sweepWork(_cmsGen);
-    }
-
-    // Update Universe::_heap_*_at_gc figures.
-    // We need all the free list locks to make the abstract state
-    // transition from Sweeping to Resetting. See detailed note
-    // further below.
-    {
-      CMSTokenSyncWithLocks ts(true, _cmsGen->freelistLock());
-
-      // Update heap occupancy information which is used as
-      // input to soft ref clearing policy at the next gc.
-      Universe::update_heap_info_at_gc();
-
-      // recalculate CMS used space after CMS collection
-      _cmsGen->cmsSpace()->recalculate_used_stable();
-
-      _collectorState = Resizing;
-    }
-  }
-  verify_work_stacks_empty();
-  verify_overflow_empty();
-
-  if (should_unload_classes()) {
-    // Delay purge to the beginning of the next safepoint.  Metaspace::contains
-    // requires that the virtual spaces are stable and not deleted.
-    ClassLoaderDataGraph::set_should_purge(true);
-  }
-
-  _intra_sweep_timer.stop();
-  _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
-
-  _inter_sweep_timer.reset();
-  _inter_sweep_timer.start();
-
-  // We need to use a monotonically non-decreasing time in ms
-  // or we will see time-warp warnings and os::javaTimeMillis()
-  // does not guarantee monotonicity.
-  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-  update_time_of_last_gc(now);
-
-  // NOTE on abstract state transitions:
-  // Mutators allocate-live and/or mark the mod-union table dirty
-  // based on the state of the collection.  The former is done in
-  // the interval [Marking, Sweeping] and the latter in the interval
-  // [Marking, Sweeping).  Thus the transitions into the Marking state
-  // and out of the Sweeping state must be synchronously visible
-  // globally to the mutators.
-  // The transition into the Marking state happens with the world
-  // stopped so the mutators will globally see it.  Sweeping is
-  // done asynchronously by the background collector so the transition
-  // from the Sweeping state to the Resizing state must be done
-  // under the freelistLock (as is the check for whether to
-  // allocate-live and whether to dirty the mod-union table).
-  assert(_collectorState == Resizing, "Change of collector state to"
-    " Resizing must be done under the freelistLocks (plural)");
-
-  // Now that sweeping has been completed, we clear
-  // the incremental_collection_failed flag,
-  // thus inviting a younger gen collection to promote into
-  // this generation. If such a promotion may still fail,
-  // the flag will be set again when a young collection is
-  // attempted.
-  CMSHeap* heap = CMSHeap::heap();
-  heap->clear_incremental_collection_failed();  // Worth retrying as fresh space may have been freed up
-  heap->update_full_collections_completed(_collection_count_start);
-}
-
-// FIX ME!!! Looks like this belongs in CFLSpace, with
-// CMSGen merely delegating to it.
-void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
-  double nearLargestPercent = FLSLargestBlockCoalesceProximity;
-  HeapWord*  minAddr        = _cmsSpace->bottom();
-  HeapWord*  largestAddr    =
-    (HeapWord*) _cmsSpace->dictionary()->find_largest_dict();
-  if (largestAddr == NULL) {
-    // The dictionary appears to be empty.  In this case
-    // try to coalesce at the end of the heap.
-    largestAddr = _cmsSpace->end();
-  }
-  size_t largestOffset     = pointer_delta(largestAddr, minAddr);
-  size_t nearLargestOffset =
-    (size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
-  log_debug(gc, freelist)("CMS: Large Block: " PTR_FORMAT "; Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
-                          p2i(largestAddr), p2i(_cmsSpace->nearLargestChunk()), p2i(minAddr + nearLargestOffset));
-  _cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
-}
-
-bool ConcurrentMarkSweepGeneration::isNearLargestChunk(HeapWord* addr) {
-  return addr >= _cmsSpace->nearLargestChunk();
-}
-
-FreeChunk* ConcurrentMarkSweepGeneration::find_chunk_at_end() {
-  return _cmsSpace->find_chunk_at_end();
-}
-
-void ConcurrentMarkSweepGeneration::update_gc_stats(Generation* current_generation,
-                                                    bool full) {
-  // If the young generation has been collected, gather any statistics
-  // that are of interest at this point.
-  bool current_is_young = CMSHeap::heap()->is_young_gen(current_generation);
-  if (!full && current_is_young) {
-    // Gather statistics on the young generation collection.
-    collector()->stats().record_gc0_end(used());
-  }
-  _cmsSpace->recalculate_used_stable();
-}
-
-void CMSCollector::sweepWork(ConcurrentMarkSweepGeneration* old_gen) {
-  // We iterate over the space(s) underlying this generation,
-  // checking the mark bit map to see if the bits corresponding
-  // to specific blocks are marked or not. Blocks that are
-  // marked are live and are not swept up. All remaining blocks
-  // are swept up, with coalescing on-the-fly as we sweep up
-  // contiguous free and/or garbage blocks:
-  // We need to ensure that the sweeper synchronizes with allocators
-  // and stop-the-world collectors. In particular, the following
-  // locks are used:
-  // . CMS token: if this is held, a stop the world collection cannot occur
-  // . freelistLock: if this is held no allocation can occur from this
-  //                 generation by another thread
-  // . bitMapLock: if this is held, no other thread can access or update
-  //
-
-  // Note that we need to hold the freelistLock if we use
-  // block iterate below; else the iterator might go awry if
-  // a mutator (or promotion) causes block contents to change
-  // (for instance if the allocator divvies up a block).
-  // If we hold the free list lock, for all practical purposes
-  // young generation GC's can't occur (they'll usually need to
-  // promote), so we might as well prevent all young generation
-  // GC's while we do a sweeping step. For the same reason, we might
-  // as well take the bit map lock for the entire duration
-
-  // check that we hold the requisite locks
-  assert(have_cms_token(), "Should hold cms token");
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), "Should possess CMS token to sweep");
-  assert_lock_strong(old_gen->freelistLock());
-  assert_lock_strong(bitMapLock());
-
-  assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
-  assert(_intra_sweep_timer.is_active(),  "Was switched on  in an outer context");
-  old_gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
-                                          _inter_sweep_estimate.padded_average(),
-                                          _intra_sweep_estimate.padded_average());
-  old_gen->setNearLargestChunk();
-
-  {
-    SweepClosure sweepClosure(this, old_gen, &_markBitMap, CMSYield);
-    old_gen->cmsSpace()->blk_iterate_careful(&sweepClosure);
-    // We need to free-up/coalesce garbage/blocks from a
-    // co-terminal free run. This is done in the SweepClosure
-    // destructor; so, do not remove this scope, else the
-    // end-of-sweep-census below will be off by a little bit.
-  }
-  old_gen->cmsSpace()->sweep_completed();
-  old_gen->cmsSpace()->endSweepFLCensus(sweep_count());
-  if (should_unload_classes()) {                // unloaded classes this cycle,
-    _concurrent_cycles_since_last_unload = 0;   // ... reset count
-  } else {                                      // did not unload classes,
-    _concurrent_cycles_since_last_unload++;     // ... increment count
-  }
-}
-
-// Reset CMS data structures (for now just the marking bit map)
-// preparatory for the next cycle.
-void CMSCollector::reset_concurrent() {
-  CMSTokenSyncWithLocks ts(true, bitMapLock());
-
-  // If the state is not "Resetting", the foreground  thread
-  // has done a collection and the resetting.
-  if (_collectorState != Resetting) {
-    assert(_collectorState == Idling, "The state should only change"
-      " because the foreground collector has finished the collection");
-    return;
-  }
-
-  {
-    // Clear the mark bitmap (no grey objects to start with)
-    // for the next cycle.
-    GCTraceCPUTime tcpu;
-    CMSPhaseAccounting cmspa(this, "Concurrent Reset");
-
-    HeapWord* curAddr = _markBitMap.startWord();
-    while (curAddr < _markBitMap.endWord()) {
-      size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
-      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
-      _markBitMap.clear_large_range(chunk);
-      if (ConcurrentMarkSweepThread::should_yield() &&
-          !foregroundGCIsActive() &&
-          CMSYield) {
-        assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-               "CMS thread should hold CMS token");
-        assert_lock_strong(bitMapLock());
-        bitMapLock()->unlock();
-        ConcurrentMarkSweepThread::desynchronize(true);
-        stopTimer();
-        incrementYields();
-
-        // See the comment in coordinator_yield()
-        for (unsigned i = 0; i < CMSYieldSleepCount &&
-                         ConcurrentMarkSweepThread::should_yield() &&
-                         !CMSCollector::foregroundGCIsActive(); ++i) {
-          os::naked_short_sleep(1);
-        }
-
-        ConcurrentMarkSweepThread::synchronize(true);
-        bitMapLock()->lock_without_safepoint_check();
-        startTimer();
-      }
-      curAddr = chunk.end();
-    }
-    // A successful mostly concurrent collection has been done.
-    // Because only the full (i.e., concurrent mode failure) collections
-    // are being measured for gc overhead limits, clean the "near" flag
-    // and count.
-    size_policy()->reset_gc_overhead_limit_count();
-    _collectorState = Idling;
-  }
-
-  register_gc_end();
-}
-
-// Same as above but for STW paths
-void CMSCollector::reset_stw() {
-  // already have the lock
-  assert(_collectorState == Resetting, "just checking");
-  assert_lock_strong(bitMapLock());
-  GCIdMark gc_id_mark(_cmsThread->gc_id());
-  _markBitMap.clear_all();
-  _collectorState = Idling;
-  register_gc_end();
-}
-
-void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
-  GCTraceCPUTime tcpu;
-  TraceCollectorStats tcs_cgc(cgc_counters());
-
-  switch (op) {
-    case CMS_op_checkpointRootsInitial: {
-      GCTraceTime(Info, gc) t("Pause Initial Mark", NULL, GCCause::_no_gc, true);
-      SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
-      checkpointRootsInitial();
-      break;
-    }
-    case CMS_op_checkpointRootsFinal: {
-      GCTraceTime(Info, gc) t("Pause Remark", NULL, GCCause::_no_gc, true);
-      SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
-      checkpointRootsFinal();
-      break;
-    }
-    default:
-      fatal("No such CMS_op");
-  }
-}
-
-#ifndef PRODUCT
-size_t const CMSCollector::skip_header_HeapWords() {
-  return FreeChunk::header_size();
-}
-
-// Try and collect here conditions that should hold when
-// CMS thread is exiting. The idea is that the foreground GC
-// thread should not be blocked if it wants to terminate
-// the CMS thread and yet continue to run the VM for a while
-// after that.
-void CMSCollector::verify_ok_to_terminate() const {
-  assert(Thread::current()->is_ConcurrentGC_thread(),
-         "should be called by CMS thread");
-  assert(!_foregroundGCShouldWait, "should be false");
-  // We could check here that all the various low-level locks
-  // are not held by the CMS thread, but that is overkill; see
-  // also CMSThread::verify_ok_to_terminate() where the CGC_lock
-  // is checked.
-}
-#endif
-
-size_t CMSCollector::block_size_using_printezis_bits(HeapWord* addr) const {
-   assert(_markBitMap.isMarked(addr) && _markBitMap.isMarked(addr + 1),
-          "missing Printezis mark?");
-  HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
-  size_t size = pointer_delta(nextOneAddr + 1, addr);
-  assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-         "alignment problem");
-  assert(size >= 3, "Necessary for Printezis marks to work");
-  return size;
-}
-
-// A variant of the above (block_size_using_printezis_bits()) except
-// that we return 0 if the P-bits are not yet set.
-size_t CMSCollector::block_size_if_printezis_bits(HeapWord* addr) const {
-  if (_markBitMap.isMarked(addr + 1)) {
-    assert(_markBitMap.isMarked(addr), "P-bit can be set only for marked objects");
-    HeapWord* nextOneAddr = _markBitMap.getNextMarkedWordAddress(addr + 2);
-    size_t size = pointer_delta(nextOneAddr + 1, addr);
-    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-           "alignment problem");
-    assert(size >= 3, "Necessary for Printezis marks to work");
-    return size;
-  }
-  return 0;
-}
-
-HeapWord* CMSCollector::next_card_start_after_block(HeapWord* addr) const {
-  size_t sz = 0;
-  oop p = (oop)addr;
-  if (p->klass_or_null_acquire() != NULL) {
-    sz = CompactibleFreeListSpace::adjustObjectSize(p->size());
-  } else {
-    sz = block_size_using_printezis_bits(addr);
-  }
-  assert(sz > 0, "size must be nonzero");
-  HeapWord* next_block = addr + sz;
-  HeapWord* next_card  = align_up(next_block, CardTable::card_size);
-  assert(align_down((uintptr_t)addr,      CardTable::card_size) <
-         align_down((uintptr_t)next_card, CardTable::card_size),
-         "must be different cards");
-  return next_card;
-}
-
-
-// CMS Bit Map Wrapper /////////////////////////////////////////
-
-// Construct a CMS bit map infrastructure, but don't create the
-// bit vector itself. That is done by a separate call CMSBitMap::allocate()
-// further below.
-CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
-  _shifter(shifter),
-  _bm(),
-  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
-                                    Monitor::_safepoint_check_never) : NULL)
-{
-  _bmStartWord = 0;
-  _bmWordSize  = 0;
-}
-
-bool CMSBitMap::allocate(MemRegion mr) {
-  _bmStartWord = mr.start();
-  _bmWordSize  = mr.word_size();
-  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
-                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
-  if (!brs.is_reserved()) {
-    log_warning(gc)("CMS bit map allocation failure");
-    return false;
-  }
-  // For now we'll just commit all of the bit map up front.
-  // Later on we'll try to be more parsimonious with swap.
-  if (!_virtual_space.initialize(brs, brs.size())) {
-    log_warning(gc)("CMS bit map backing store failure");
-    return false;
-  }
-  assert(_virtual_space.committed_size() == brs.size(),
-         "didn't reserve backing store for all of CMS bit map?");
-  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
-         _bmWordSize, "inconsistency in bit map sizing");
-  _bm = BitMapView((BitMap::bm_word_t*)_virtual_space.low(), _bmWordSize >> _shifter);
-
-  // bm.clear(); // can we rely on getting zero'd memory? verify below
-  assert(isAllClear(),
-         "Expected zero'd memory from ReservedSpace constructor");
-  assert(_bm.size() == heapWordDiffToOffsetDiff(sizeInWords()),
-         "consistency check");
-  return true;
-}
-
-void CMSBitMap::dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl) {
-  HeapWord *next_addr, *end_addr, *last_addr;
-  assert_locked();
-  assert(covers(mr), "out-of-range error");
-  // XXX assert that start and end are appropriately aligned
-  for (next_addr = mr.start(), end_addr = mr.end();
-       next_addr < end_addr; next_addr = last_addr) {
-    MemRegion dirty_region = getAndClearMarkedRegion(next_addr, end_addr);
-    last_addr = dirty_region.end();
-    if (!dirty_region.is_empty()) {
-      cl->do_MemRegion(dirty_region);
-    } else {
-      assert(last_addr == end_addr, "program logic");
-      return;
-    }
-  }
-}
-
-void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
-  _bm.print_on_error(st, prefix);
-}
-
-#ifndef PRODUCT
-void CMSBitMap::assert_locked() const {
-  CMSLockVerifier::assert_locked(lock());
-}
-
-bool CMSBitMap::covers(MemRegion mr) const {
-  // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
-  assert((size_t)_bm.size() == (_bmWordSize >> _shifter),
-         "size inconsistency");
-  return (mr.start() >= _bmStartWord) &&
-         (mr.end()   <= endWord());
-}
-
-bool CMSBitMap::covers(HeapWord* start, size_t size) const {
-    return (start >= _bmStartWord && (start + size) <= endWord());
-}
-
-void CMSBitMap::verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) {
-  // verify that there are no 1 bits in the interval [left, right)
-  FalseBitMapClosure falseBitMapClosure;
-  iterate(&falseBitMapClosure, left, right);
-}
-
-void CMSBitMap::region_invariant(MemRegion mr)
-{
-  assert_locked();
-  // mr = mr.intersection(MemRegion(_bmStartWord, _bmWordSize));
-  assert(!mr.is_empty(), "unexpected empty region");
-  assert(covers(mr), "mr should be covered by bit map");
-  // convert address range into offset range
-  size_t start_ofs = heapWordToOffset(mr.start());
-  // Make sure that end() is appropriately aligned
-  assert(mr.end() == align_up(mr.end(), (1 << (_shifter+LogHeapWordSize))),
-         "Misaligned mr.end()");
-  size_t end_ofs   = heapWordToOffset(mr.end());
-  assert(end_ofs > start_ofs, "Should mark at least one bit");
-}
-
-#endif
-
-bool CMSMarkStack::allocate(size_t size) {
-  // allocate a stack of the requisite depth
-  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
-                   size * sizeof(oop)));
-  if (!rs.is_reserved()) {
-    log_warning(gc)("CMSMarkStack allocation failure");
-    return false;
-  }
-  if (!_virtual_space.initialize(rs, rs.size())) {
-    log_warning(gc)("CMSMarkStack backing store failure");
-    return false;
-  }
-  assert(_virtual_space.committed_size() == rs.size(),
-         "didn't reserve backing store for all of CMS stack?");
-  _base = (oop*)(_virtual_space.low());
-  _index = 0;
-  _capacity = size;
-  NOT_PRODUCT(_max_depth = 0);
-  return true;
-}
-
-// XXX FIX ME !!! In the MT case we come in here holding a
-// leaf lock. For printing we need to take a further lock
-// which has lower rank. We need to recalibrate the two
-// lock-ranks involved in order to be able to print the
-// messages below. (Or defer the printing to the caller.
-// For now we take the expedient path of just disabling the
-// messages for the problematic case.)
-void CMSMarkStack::expand() {
-  assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted");
-  if (_capacity == MarkStackSizeMax) {
-    if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled) {
-      // We print a warning message only once per CMS cycle.
-      log_debug(gc)(" (benign) Hit CMSMarkStack max size limit");
-    }
-    return;
-  }
-  // Double capacity if possible
-  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
-  // Do not give up existing stack until we have managed to
-  // get the double capacity that we desired.
-  ReservedSpace rs(ReservedSpace::allocation_align_size_up(
-                   new_capacity * sizeof(oop)));
-  if (rs.is_reserved()) {
-    // Release the backing store associated with old stack
-    _virtual_space.release();
-    // Reinitialize virtual space for new stack
-    if (!_virtual_space.initialize(rs, rs.size())) {
-      fatal("Not enough swap for expanded marking stack");
-    }
-    _base = (oop*)(_virtual_space.low());
-    _index = 0;
-    _capacity = new_capacity;
-  } else if (_failed_double++ == 0 && !CMSConcurrentMTEnabled) {
-    // Failed to double capacity, continue;
-    // we print a detail message only once per CMS cycle.
-    log_debug(gc)(" (benign) Failed to expand marking stack from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                        _capacity / K, new_capacity / K);
-  }
-}
-
-
-// Closures
-// XXX: there seems to be a lot of code  duplication here;
-// should refactor and consolidate common code.
-
-// This closure is used to mark refs into the CMS generation in
-// the CMS bit map. Called at the first checkpoint. This closure
-// assumes that we do not need to re-mark dirty cards; if the CMS
-// generation on which this is used is not an oldest
-// generation then this will lose younger_gen cards!
-
-MarkRefsIntoClosure::MarkRefsIntoClosure(
-  MemRegion span, CMSBitMap* bitMap):
-    _span(span),
-    _bitMap(bitMap)
-{
-  assert(ref_discoverer() == NULL, "deliberately left NULL");
-  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
-}
-
-void MarkRefsIntoClosure::do_oop(oop obj) {
-  // if p points into _span, then mark corresponding bit in _markBitMap
-  assert(oopDesc::is_oop(obj), "expected an oop");
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr)) {
-    // this should be made more efficient
-    _bitMap->mark(addr);
-  }
-}
-
-ParMarkRefsIntoClosure::ParMarkRefsIntoClosure(
-  MemRegion span, CMSBitMap* bitMap):
-    _span(span),
-    _bitMap(bitMap)
-{
-  assert(ref_discoverer() == NULL, "deliberately left NULL");
-  assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
-}
-
-void ParMarkRefsIntoClosure::do_oop(oop obj) {
-  // if p points into _span, then mark corresponding bit in _markBitMap
-  assert(oopDesc::is_oop(obj), "expected an oop");
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr)) {
-    // this should be made more efficient
-    _bitMap->par_mark(addr);
-  }
-}
-
-// A variant of the above, used for CMS marking verification.
-MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure(
-  MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm):
-    _span(span),
-    _verification_bm(verification_bm),
-    _cms_bm(cms_bm)
-{
-  assert(ref_discoverer() == NULL, "deliberately left NULL");
-  assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
-}
-
-void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
-  // if p points into _span, then mark corresponding bit in _markBitMap
-  assert(oopDesc::is_oop(obj), "expected an oop");
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr)) {
-    _verification_bm->mark(addr);
-    if (!_cms_bm->isMarked(addr)) {
-      Log(gc, verify) log;
-      ResourceMark rm;
-      LogStream ls(log.error());
-      oop(addr)->print_on(&ls);
-      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
-      fatal("... aborting");
-    }
-  }
-}
-
-//////////////////////////////////////////////////
-// MarkRefsIntoAndScanClosure
-//////////////////////////////////////////////////
-
-MarkRefsIntoAndScanClosure::MarkRefsIntoAndScanClosure(MemRegion span,
-                                                       ReferenceDiscoverer* rd,
-                                                       CMSBitMap* bit_map,
-                                                       CMSBitMap* mod_union_table,
-                                                       CMSMarkStack*  mark_stack,
-                                                       CMSCollector* collector,
-                                                       bool should_yield,
-                                                       bool concurrent_precleaning):
-  _span(span),
-  _bit_map(bit_map),
-  _mark_stack(mark_stack),
-  _pushAndMarkClosure(collector, span, rd, bit_map, mod_union_table,
-                      mark_stack, concurrent_precleaning),
-  _collector(collector),
-  _freelistLock(NULL),
-  _yield(should_yield),
-  _concurrent_precleaning(concurrent_precleaning)
-{
-  // FIXME: Should initialize in base class constructor.
-  assert(rd != NULL, "ref_discoverer shouldn't be NULL");
-  set_ref_discoverer_internal(rd);
-}
-
-// This closure is used to mark refs into the CMS generation at the
-// second (final) checkpoint, and to scan and transitively follow
-// the unmarked oops. It is also used during the concurrent precleaning
-// phase while scanning objects on dirty cards in the CMS generation.
-// The marks are made in the marking bit map and the marking stack is
-// used for keeping the (newly) grey objects during the scan.
-// The parallel version (Par_...) appears further below.
-void MarkRefsIntoAndScanClosure::do_oop(oop obj) {
-  if (obj != NULL) {
-    assert(oopDesc::is_oop(obj), "expected an oop");
-    HeapWord* addr = (HeapWord*)obj;
-    assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
-    assert(_collector->overflow_list_is_empty(),
-           "overflow list should be empty");
-    if (_span.contains(addr) &&
-        !_bit_map->isMarked(addr)) {
-      // mark bit map (object is now grey)
-      _bit_map->mark(addr);
-      // push on marking stack (stack should be empty), and drain the
-      // stack by applying this closure to the oops in the oops popped
-      // from the stack (i.e. blacken the grey objects)
-      bool res = _mark_stack->push(obj);
-      assert(res, "Should have space to push on empty stack");
-      do {
-        oop new_oop = _mark_stack->pop();
-        assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
-        assert(_bit_map->isMarked((HeapWord*)new_oop),
-               "only grey objects on this stack");
-        // iterate over the oops in this oop, marking and pushing
-        // the ones in CMS heap (i.e. in _span).
-        new_oop->oop_iterate(&_pushAndMarkClosure);
-        // check if it's time to yield
-        do_yield_check();
-      } while (!_mark_stack->isEmpty() ||
-               (!_concurrent_precleaning && take_from_overflow_list()));
-        // if marking stack is empty, and we are not doing this
-        // during precleaning, then check the overflow list
-    }
-    assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
-    assert(_collector->overflow_list_is_empty(),
-           "overflow list was drained above");
-
-    assert(_collector->no_preserved_marks(),
-           "All preserved marks should have been restored above");
-  }
-}
-
-void MarkRefsIntoAndScanClosure::do_yield_work() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_freelistLock);
-  assert_lock_strong(_bit_map->lock());
-  // relinquish the free_list_lock and bitMaplock()
-  _bit_map->lock()->unlock();
-  _freelistLock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  _collector->stopTimer();
-  _collector->incrementYields();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0;
-       i < CMSYieldSleepCount &&
-       ConcurrentMarkSweepThread::should_yield() &&
-       !CMSCollector::foregroundGCIsActive();
-       ++i) {
-    os::naked_short_sleep(1);
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _freelistLock->lock_without_safepoint_check();
-  _bit_map->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-///////////////////////////////////////////////////////////
-// ParMarkRefsIntoAndScanClosure: a parallel version of
-//                                MarkRefsIntoAndScanClosure
-///////////////////////////////////////////////////////////
-ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure(
-  CMSCollector* collector, MemRegion span, ReferenceDiscoverer* rd,
-  CMSBitMap* bit_map, OopTaskQueue* work_queue):
-  _span(span),
-  _bit_map(bit_map),
-  _work_queue(work_queue),
-  _low_water_mark(MIN2((work_queue->max_elems()/4),
-                       ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
-  _parPushAndMarkClosure(collector, span, rd, bit_map, work_queue)
-{
-  // FIXME: Should initialize in base class constructor.
-  assert(rd != NULL, "ref_discoverer shouldn't be NULL");
-  set_ref_discoverer_internal(rd);
-}
-
-// This closure is used to mark refs into the CMS generation at the
-// second (final) checkpoint, and to scan and transitively follow
-// the unmarked oops. The marks are made in the marking bit map and
-// the work_queue is used for keeping the (newly) grey objects during
-// the scan phase whence they are also available for stealing by parallel
-// threads. Since the marking bit map is shared, updates are
-// synchronized (via CAS).
-void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) {
-  if (obj != NULL) {
-    // Ignore mark word because this could be an already marked oop
-    // that may be chained at the end of the overflow list.
-    assert(oopDesc::is_oop(obj, true), "expected an oop");
-    HeapWord* addr = (HeapWord*)obj;
-    if (_span.contains(addr) &&
-        !_bit_map->isMarked(addr)) {
-      // mark bit map (object will become grey):
-      // It is possible for several threads to be
-      // trying to "claim" this object concurrently;
-      // the unique thread that succeeds in marking the
-      // object first will do the subsequent push on
-      // to the work queue (or overflow list).
-      if (_bit_map->par_mark(addr)) {
-        // push on work_queue (which may not be empty), and trim the
-        // queue to an appropriate length by applying this closure to
-        // the oops in the oops popped from the stack (i.e. blacken the
-        // grey objects)
-        bool res = _work_queue->push(obj);
-        assert(res, "Low water mark should be less than capacity?");
-        trim_queue(_low_water_mark);
-      } // Else, another thread claimed the object
-    }
-  }
-}
-
-// This closure is used to rescan the marked objects on the dirty cards
-// in the mod union table and the card table proper.
-size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m(
-  oop p, MemRegion mr) {
-
-  size_t size = 0;
-  HeapWord* addr = (HeapWord*)p;
-  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
-  assert(_span.contains(addr), "we are scanning the CMS generation");
-  // check if it's time to yield
-  if (do_yield_check()) {
-    // We yielded for some foreground stop-world work,
-    // and we have been asked to abort this ongoing preclean cycle.
-    return 0;
-  }
-  if (_bitMap->isMarked(addr)) {
-    // it's marked; is it potentially uninitialized?
-    if (p->klass_or_null_acquire() != NULL) {
-        // an initialized object; ignore mark word in verification below
-        // since we are running concurrent with mutators
-        assert(oopDesc::is_oop(p, true), "should be an oop");
-        if (p->is_objArray()) {
-          // objArrays are precisely marked; restrict scanning
-          // to dirty cards only.
-          size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate_size(_scanningClosure, mr));
-        } else {
-          // A non-array may have been imprecisely marked; we need
-          // to scan object in its entirety.
-          size = CompactibleFreeListSpace::adjustObjectSize(
-                   p->oop_iterate_size(_scanningClosure));
-        }
-      #ifdef ASSERT
-        size_t direct_size =
-          CompactibleFreeListSpace::adjustObjectSize(p->size());
-        assert(size == direct_size, "Inconsistency in size");
-        assert(size >= 3, "Necessary for Printezis marks to work");
-        HeapWord* start_pbit = addr + 1;
-        HeapWord* end_pbit = addr + size - 1;
-        assert(_bitMap->isMarked(start_pbit) == _bitMap->isMarked(end_pbit),
-               "inconsistent Printezis mark");
-        // Verify inner mark bits (between Printezis bits) are clear,
-        // but don't repeat if there are multiple dirty regions for
-        // the same object, to avoid potential O(N^2) performance.
-        if (addr != _last_scanned_object) {
-          _bitMap->verifyNoOneBitsInRange(start_pbit + 1, end_pbit);
-          _last_scanned_object = addr;
-        }
-      #endif // ASSERT
-    } else {
-      // An uninitialized object.
-      assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
-      HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
-      size = pointer_delta(nextOneAddr + 1, addr);
-      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-             "alignment problem");
-      // Note that pre-cleaning needn't redirty the card. OopDesc::set_klass()
-      // will dirty the card when the klass pointer is installed in the
-      // object (signaling the completion of initialization).
-    }
-  } else {
-    // Either a not yet marked object or an uninitialized object
-    if (p->klass_or_null_acquire() == NULL) {
-      // An uninitialized object, skip to the next card, since
-      // we may not be able to read its P-bits yet.
-      assert(size == 0, "Initial value");
-    } else {
-      // An object not (yet) reached by marking: we merely need to
-      // compute its size so as to go look at the next block.
-      assert(oopDesc::is_oop(p, true), "should be an oop");
-      size = CompactibleFreeListSpace::adjustObjectSize(p->size());
-    }
-  }
-  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
-  return size;
-}
-
-void ScanMarkedObjectsAgainCarefullyClosure::do_yield_work() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_freelistLock);
-  assert_lock_strong(_bitMap->lock());
-  // relinquish the free_list_lock and bitMaplock()
-  _bitMap->lock()->unlock();
-  _freelistLock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  _collector->stopTimer();
-  _collector->incrementYields();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-                   ConcurrentMarkSweepThread::should_yield() &&
-                   !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::naked_short_sleep(1);
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _freelistLock->lock_without_safepoint_check();
-  _bitMap->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-
-//////////////////////////////////////////////////////////////////
-// SurvivorSpacePrecleanClosure
-//////////////////////////////////////////////////////////////////
-// This (single-threaded) closure is used to preclean the oops in
-// the survivor spaces.
-size_t SurvivorSpacePrecleanClosure::do_object_careful(oop p) {
-
-  HeapWord* addr = (HeapWord*)p;
-  DEBUG_ONLY(_collector->verify_work_stacks_empty();)
-  assert(!_span.contains(addr), "we are scanning the survivor spaces");
-  assert(p->klass_or_null() != NULL, "object should be initialized");
-  // an initialized object; ignore mark word in verification below
-  // since we are running concurrent with mutators
-  assert(oopDesc::is_oop(p, true), "should be an oop");
-  // Note that we do not yield while we iterate over
-  // the interior oops of p, pushing the relevant ones
-  // on our marking stack.
-  size_t size = p->oop_iterate_size(_scanning_closure);
-  do_yield_check();
-  // Observe that below, we do not abandon the preclean
-  // phase as soon as we should; rather we empty the
-  // marking stack before returning. This is to satisfy
-  // some existing assertions. In general, it may be a
-  // good idea to abort immediately and complete the marking
-  // from the grey objects at a later time.
-  while (!_mark_stack->isEmpty()) {
-    oop new_oop = _mark_stack->pop();
-    assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
-    assert(_bit_map->isMarked((HeapWord*)new_oop),
-           "only grey objects on this stack");
-    // iterate over the oops in this oop, marking and pushing
-    // the ones in CMS heap (i.e. in _span).
-    new_oop->oop_iterate(_scanning_closure);
-    // check if it's time to yield
-    do_yield_check();
-  }
-  unsigned int after_count =
-    CMSHeap::heap()->total_collections();
-  bool abort = (_before_count != after_count) ||
-               _collector->should_abort_preclean();
-  return abort ? 0 : size;
-}
-
-void SurvivorSpacePrecleanClosure::do_yield_work() {
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_bit_map->lock());
-  // Relinquish the bit map lock
-  _bit_map->lock()->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  _collector->stopTimer();
-  _collector->incrementYields();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-                       ConcurrentMarkSweepThread::should_yield() &&
-                       !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::naked_short_sleep(1);
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _bit_map->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-// This closure is used to rescan the marked objects on the dirty cards
-// in the mod union table and the card table proper. In the parallel
-// case, although the bitMap is shared, we do a single read so the
-// isMarked() query is "safe".
-bool ScanMarkedObjectsAgainClosure::do_object_bm(oop p, MemRegion mr) {
-  // Ignore mark word because we are running concurrent with mutators
-  assert(oopDesc::is_oop_or_null(p, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(p));
-  HeapWord* addr = (HeapWord*)p;
-  assert(_span.contains(addr), "we are scanning the CMS generation");
-  bool is_obj_array = false;
-  #ifdef ASSERT
-    if (!_parallel) {
-      assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
-      assert(_collector->overflow_list_is_empty(),
-             "overflow list should be empty");
-
-    }
-  #endif // ASSERT
-  if (_bit_map->isMarked(addr)) {
-    // Obj arrays are precisely marked, non-arrays are not;
-    // so we scan objArrays precisely and non-arrays in their
-    // entirety.
-    if (p->is_objArray()) {
-      is_obj_array = true;
-      if (_parallel) {
-        p->oop_iterate(_par_scan_closure, mr);
-      } else {
-        p->oop_iterate(_scan_closure, mr);
-      }
-    } else {
-      if (_parallel) {
-        p->oop_iterate(_par_scan_closure);
-      } else {
-        p->oop_iterate(_scan_closure);
-      }
-    }
-  }
-  #ifdef ASSERT
-    if (!_parallel) {
-      assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
-      assert(_collector->overflow_list_is_empty(),
-             "overflow list should be empty");
-
-    }
-  #endif // ASSERT
-  return is_obj_array;
-}
-
-MarkFromRootsClosure::MarkFromRootsClosure(CMSCollector* collector,
-                        MemRegion span,
-                        CMSBitMap* bitMap, CMSMarkStack*  markStack,
-                        bool should_yield, bool verifying):
-  _collector(collector),
-  _span(span),
-  _bitMap(bitMap),
-  _mut(&collector->_modUnionTable),
-  _markStack(markStack),
-  _yield(should_yield),
-  _skipBits(0)
-{
-  assert(_markStack->isEmpty(), "stack should be empty");
-  _finger = _bitMap->startWord();
-  _threshold = _finger;
-  assert(_collector->_restart_addr == NULL, "Sanity check");
-  assert(_span.contains(_finger), "Out of bounds _finger?");
-  DEBUG_ONLY(_verifying = verifying;)
-}
-
-void MarkFromRootsClosure::reset(HeapWord* addr) {
-  assert(_markStack->isEmpty(), "would cause duplicates on stack");
-  assert(_span.contains(addr), "Out of bounds _finger?");
-  _finger = addr;
-  _threshold = align_up(_finger, CardTable::card_size);
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-bool MarkFromRootsClosure::do_bit(size_t offset) {
-  if (_skipBits > 0) {
-    _skipBits--;
-    return true;
-  }
-  // convert offset into a HeapWord*
-  HeapWord* addr = _bitMap->startWord() + offset;
-  assert(_bitMap->endWord() && addr < _bitMap->endWord(),
-         "address out of range");
-  assert(_bitMap->isMarked(addr), "tautology");
-  if (_bitMap->isMarked(addr+1)) {
-    // this is an allocated but not yet initialized object
-    assert(_skipBits == 0, "tautology");
-    _skipBits = 2;  // skip next two marked bits ("Printezis-marks")
-    oop p = oop(addr);
-    if (p->klass_or_null_acquire() == NULL) {
-      DEBUG_ONLY(if (!_verifying) {)
-        // We re-dirty the cards on which this object lies and increase
-        // the _threshold so that we'll come back to scan this object
-        // during the preclean or remark phase. (CMSCleanOnEnter)
-        if (CMSCleanOnEnter) {
-          size_t sz = _collector->block_size_using_printezis_bits(addr);
-          HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
-          MemRegion redirty_range = MemRegion(addr, end_card_addr);
-          assert(!redirty_range.is_empty(), "Arithmetical tautology");
-          // Bump _threshold to end_card_addr; note that
-          // _threshold cannot possibly exceed end_card_addr, anyhow.
-          // This prevents future clearing of the card as the scan proceeds
-          // to the right.
-          assert(_threshold <= end_card_addr,
-                 "Because we are just scanning into this object");
-          if (_threshold < end_card_addr) {
-            _threshold = end_card_addr;
-          }
-          if (p->klass_or_null_acquire() != NULL) {
-            // Redirty the range of cards...
-            _mut->mark_range(redirty_range);
-          } // ...else the setting of klass will dirty the card anyway.
-        }
-      DEBUG_ONLY(})
-      return true;
-    }
-  }
-  scanOopsInOop(addr);
-  return true;
-}
-
-// We take a break if we've been at this for a while,
-// so as to avoid monopolizing the locks involved.
-void MarkFromRootsClosure::do_yield_work() {
-  // First give up the locks, then yield, then re-lock
-  // We should probably use a constructor/destructor idiom to
-  // do this unlock/lock or modify the MutexUnlocker class to
-  // serve our purpose. XXX
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  assert_lock_strong(_bitMap->lock());
-  _bitMap->lock()->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  _collector->stopTimer();
-  _collector->incrementYields();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-                       ConcurrentMarkSweepThread::should_yield() &&
-                       !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::naked_short_sleep(1);
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _bitMap->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-void MarkFromRootsClosure::scanOopsInOop(HeapWord* ptr) {
-  assert(_bitMap->isMarked(ptr), "expected bit to be set");
-  assert(_markStack->isEmpty(),
-         "should drain stack to limit stack usage");
-  // convert ptr to an oop preparatory to scanning
-  oop obj = oop(ptr);
-  // Ignore mark word in verification below, since we
-  // may be running concurrent with mutators.
-  assert(oopDesc::is_oop(obj, true), "should be an oop");
-  assert(_finger <= ptr, "_finger runneth ahead");
-  // advance the finger to right end of this object
-  _finger = ptr + obj->size();
-  assert(_finger > ptr, "we just incremented it above");
-  // On large heaps, it may take us some time to get through
-  // the marking phase. During
-  // this time it's possible that a lot of mutations have
-  // accumulated in the card table and the mod union table --
-  // these mutation records are redundant until we have
-  // actually traced into the corresponding card.
-  // Here, we check whether advancing the finger would make
-  // us cross into a new card, and if so clear corresponding
-  // cards in the MUT (preclean them in the card-table in the
-  // future).
-
-  DEBUG_ONLY(if (!_verifying) {)
-    // The clean-on-enter optimization is disabled by default,
-    // until we fix 6178663.
-    if (CMSCleanOnEnter && (_finger > _threshold)) {
-      // [_threshold, _finger) represents the interval
-      // of cards to be cleared  in MUT (or precleaned in card table).
-      // The set of cards to be cleared is all those that overlap
-      // with the interval [_threshold, _finger); note that
-      // _threshold is always kept card-aligned but _finger isn't
-      // always card-aligned.
-      HeapWord* old_threshold = _threshold;
-      assert(is_aligned(old_threshold, CardTable::card_size),
-             "_threshold should always be card-aligned");
-      _threshold = align_up(_finger, CardTable::card_size);
-      MemRegion mr(old_threshold, _threshold);
-      assert(!mr.is_empty(), "Control point invariant");
-      assert(_span.contains(mr), "Should clear within span");
-      _mut->clear_range(mr);
-    }
-  DEBUG_ONLY(})
-  // Note: the finger doesn't advance while we drain
-  // the stack below.
-  PushOrMarkClosure pushOrMarkClosure(_collector,
-                                      _span, _bitMap, _markStack,
-                                      _finger, this);
-  bool res = _markStack->push(obj);
-  assert(res, "Empty non-zero size stack should have space for single push");
-  while (!_markStack->isEmpty()) {
-    oop new_oop = _markStack->pop();
-    // Skip verifying header mark word below because we are
-    // running concurrent with mutators.
-    assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
-    // now scan this oop's oops
-    new_oop->oop_iterate(&pushOrMarkClosure);
-    do_yield_check();
-  }
-  assert(_markStack->isEmpty(), "tautology, emphasizing post-condition");
-}
-
-ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task,
-                       CMSCollector* collector, MemRegion span,
-                       CMSBitMap* bit_map,
-                       OopTaskQueue* work_queue,
-                       CMSMarkStack*  overflow_stack):
-  _collector(collector),
-  _whole_span(collector->_span),
-  _span(span),
-  _bit_map(bit_map),
-  _mut(&collector->_modUnionTable),
-  _work_queue(work_queue),
-  _overflow_stack(overflow_stack),
-  _skip_bits(0),
-  _task(task)
-{
-  assert(_work_queue->size() == 0, "work_queue should be empty");
-  _finger = span.start();
-  _threshold = _finger;     // XXX Defer clear-on-enter optimization for now
-  assert(_span.contains(_finger), "Out of bounds _finger?");
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-bool ParMarkFromRootsClosure::do_bit(size_t offset) {
-  if (_skip_bits > 0) {
-    _skip_bits--;
-    return true;
-  }
-  // convert offset into a HeapWord*
-  HeapWord* addr = _bit_map->startWord() + offset;
-  assert(_bit_map->endWord() && addr < _bit_map->endWord(),
-         "address out of range");
-  assert(_bit_map->isMarked(addr), "tautology");
-  if (_bit_map->isMarked(addr+1)) {
-    // this is an allocated object that might not yet be initialized
-    assert(_skip_bits == 0, "tautology");
-    _skip_bits = 2;  // skip next two marked bits ("Printezis-marks")
-    oop p = oop(addr);
-    if (p->klass_or_null_acquire() == NULL) {
-      // in the case of Clean-on-Enter optimization, redirty card
-      // and avoid clearing card by increasing  the threshold.
-      return true;
-    }
-  }
-  scan_oops_in_oop(addr);
-  return true;
-}
-
-void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) {
-  assert(_bit_map->isMarked(ptr), "expected bit to be set");
-  // Should we assert that our work queue is empty or
-  // below some drain limit?
-  assert(_work_queue->size() == 0,
-         "should drain stack to limit stack usage");
-  // convert ptr to an oop preparatory to scanning
-  oop obj = oop(ptr);
-  // Ignore mark word in verification below, since we
-  // may be running concurrent with mutators.
-  assert(oopDesc::is_oop(obj, true), "should be an oop");
-  assert(_finger <= ptr, "_finger runneth ahead");
-  // advance the finger to right end of this object
-  _finger = ptr + obj->size();
-  assert(_finger > ptr, "we just incremented it above");
-  // On large heaps, it may take us some time to get through
-  // the marking phase. During
-  // this time it's possible that a lot of mutations have
-  // accumulated in the card table and the mod union table --
-  // these mutation records are redundant until we have
-  // actually traced into the corresponding card.
-  // Here, we check whether advancing the finger would make
-  // us cross into a new card, and if so clear corresponding
-  // cards in the MUT (preclean them in the card-table in the
-  // future).
-
-  // The clean-on-enter optimization is disabled by default,
-  // until we fix 6178663.
-  if (CMSCleanOnEnter && (_finger > _threshold)) {
-    // [_threshold, _finger) represents the interval
-    // of cards to be cleared  in MUT (or precleaned in card table).
-    // The set of cards to be cleared is all those that overlap
-    // with the interval [_threshold, _finger); note that
-    // _threshold is always kept card-aligned but _finger isn't
-    // always card-aligned.
-    HeapWord* old_threshold = _threshold;
-    assert(is_aligned(old_threshold, CardTable::card_size),
-           "_threshold should always be card-aligned");
-    _threshold = align_up(_finger, CardTable::card_size);
-    MemRegion mr(old_threshold, _threshold);
-    assert(!mr.is_empty(), "Control point invariant");
-    assert(_span.contains(mr), "Should clear within span"); // _whole_span ??
-    _mut->clear_range(mr);
-  }
-
-  // Note: the local finger doesn't advance while we drain
-  // the stack below, but the global finger sure can and will.
-  HeapWord* volatile* gfa = _task->global_finger_addr();
-  ParPushOrMarkClosure pushOrMarkClosure(_collector,
-                                         _span, _bit_map,
-                                         _work_queue,
-                                         _overflow_stack,
-                                         _finger,
-                                         gfa, this);
-  bool res = _work_queue->push(obj);   // overflow could occur here
-  assert(res, "Will hold once we use workqueues");
-  while (true) {
-    oop new_oop;
-    if (!_work_queue->pop_local(new_oop)) {
-      // We emptied our work_queue; check if there's stuff that can
-      // be gotten from the overflow stack.
-      if (CMSConcMarkingTask::get_work_from_overflow_stack(
-            _overflow_stack, _work_queue)) {
-        do_yield_check();
-        continue;
-      } else {  // done
-        break;
-      }
-    }
-    // Skip verifying header mark word below because we are
-    // running concurrent with mutators.
-    assert(oopDesc::is_oop(new_oop, true), "Oops! expected to pop an oop");
-    // now scan this oop's oops
-    new_oop->oop_iterate(&pushOrMarkClosure);
-    do_yield_check();
-  }
-  assert(_work_queue->size() == 0, "tautology, emphasizing post-condition");
-}
-
-// Yield in response to a request from VM Thread or
-// from mutators.
-void ParMarkFromRootsClosure::do_yield_work() {
-  assert(_task != NULL, "sanity");
-  _task->yield();
-}
-
-// A variant of the above used for verifying CMS marking work.
-MarkFromRootsVerifyClosure::MarkFromRootsVerifyClosure(CMSCollector* collector,
-                        MemRegion span,
-                        CMSBitMap* verification_bm, CMSBitMap* cms_bm,
-                        CMSMarkStack*  mark_stack):
-  _collector(collector),
-  _span(span),
-  _verification_bm(verification_bm),
-  _cms_bm(cms_bm),
-  _mark_stack(mark_stack),
-  _pam_verify_closure(collector, span, verification_bm, cms_bm,
-                      mark_stack)
-{
-  assert(_mark_stack->isEmpty(), "stack should be empty");
-  _finger = _verification_bm->startWord();
-  assert(_collector->_restart_addr == NULL, "Sanity check");
-  assert(_span.contains(_finger), "Out of bounds _finger?");
-}
-
-void MarkFromRootsVerifyClosure::reset(HeapWord* addr) {
-  assert(_mark_stack->isEmpty(), "would cause duplicates on stack");
-  assert(_span.contains(addr), "Out of bounds _finger?");
-  _finger = addr;
-}
-
-// Should revisit to see if this should be restructured for
-// greater efficiency.
-bool MarkFromRootsVerifyClosure::do_bit(size_t offset) {
-  // convert offset into a HeapWord*
-  HeapWord* addr = _verification_bm->startWord() + offset;
-  assert(_verification_bm->endWord() && addr < _verification_bm->endWord(),
-         "address out of range");
-  assert(_verification_bm->isMarked(addr), "tautology");
-  assert(_cms_bm->isMarked(addr), "tautology");
-
-  assert(_mark_stack->isEmpty(),
-         "should drain stack to limit stack usage");
-  // convert addr to an oop preparatory to scanning
-  oop obj = oop(addr);
-  assert(oopDesc::is_oop(obj), "should be an oop");
-  assert(_finger <= addr, "_finger runneth ahead");
-  // advance the finger to right end of this object
-  _finger = addr + obj->size();
-  assert(_finger > addr, "we just incremented it above");
-  // Note: the finger doesn't advance while we drain
-  // the stack below.
-  bool res = _mark_stack->push(obj);
-  assert(res, "Empty non-zero size stack should have space for single push");
-  while (!_mark_stack->isEmpty()) {
-    oop new_oop = _mark_stack->pop();
-    assert(oopDesc::is_oop(new_oop), "Oops! expected to pop an oop");
-    // now scan this oop's oops
-    new_oop->oop_iterate(&_pam_verify_closure);
-  }
-  assert(_mark_stack->isEmpty(), "tautology, emphasizing post-condition");
-  return true;
-}
-
-PushAndMarkVerifyClosure::PushAndMarkVerifyClosure(
-  CMSCollector* collector, MemRegion span,
-  CMSBitMap* verification_bm, CMSBitMap* cms_bm,
-  CMSMarkStack*  mark_stack):
-  MetadataVisitingOopIterateClosure(collector->ref_processor()),
-  _collector(collector),
-  _span(span),
-  _verification_bm(verification_bm),
-  _cms_bm(cms_bm),
-  _mark_stack(mark_stack)
-{ }
-
-template <class T> void PushAndMarkVerifyClosure::do_oop_work(T *p) {
-  oop obj = RawAccess<>::oop_load(p);
-  do_oop(obj);
-}
-
-void PushAndMarkVerifyClosure::do_oop(oop* p)       { PushAndMarkVerifyClosure::do_oop_work(p); }
-void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); }
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void PushAndMarkVerifyClosure::handle_stack_overflow(HeapWord* lost) {
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_mark_stack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _mark_stack->reset();  // discard stack contents
-  _mark_stack->expand(); // expand the stack if possible
-}
-
-void PushAndMarkVerifyClosure::do_oop(oop obj) {
-  assert(oopDesc::is_oop_or_null(obj), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr) && !_verification_bm->isMarked(addr)) {
-    // Oop lies in _span and isn't yet grey or black
-    _verification_bm->mark(addr);            // now grey
-    if (!_cms_bm->isMarked(addr)) {
-      Log(gc, verify) log;
-      ResourceMark rm;
-      LogStream ls(log.error());
-      oop(addr)->print_on(&ls);
-      log.error(" (" INTPTR_FORMAT " should have been marked)", p2i(addr));
-      fatal("... aborting");
-    }
-
-    if (!_mark_stack->push(obj)) { // stack overflow
-      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _mark_stack->capacity());
-      assert(_mark_stack->isFull(), "Else push should have succeeded");
-      handle_stack_overflow(addr);
-    }
-    // anything including and to the right of _finger
-    // will be scanned as we iterate over the remainder of the
-    // bit map
-  }
-}
-
-PushOrMarkClosure::PushOrMarkClosure(CMSCollector* collector,
-                     MemRegion span,
-                     CMSBitMap* bitMap, CMSMarkStack*  markStack,
-                     HeapWord* finger, MarkFromRootsClosure* parent) :
-  MetadataVisitingOopIterateClosure(collector->ref_processor()),
-  _collector(collector),
-  _span(span),
-  _bitMap(bitMap),
-  _markStack(markStack),
-  _finger(finger),
-  _parent(parent)
-{ }
-
-ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector,
-                                           MemRegion span,
-                                           CMSBitMap* bit_map,
-                                           OopTaskQueue* work_queue,
-                                           CMSMarkStack*  overflow_stack,
-                                           HeapWord* finger,
-                                           HeapWord* volatile* global_finger_addr,
-                                           ParMarkFromRootsClosure* parent) :
-  MetadataVisitingOopIterateClosure(collector->ref_processor()),
-  _collector(collector),
-  _whole_span(collector->_span),
-  _span(span),
-  _bit_map(bit_map),
-  _work_queue(work_queue),
-  _overflow_stack(overflow_stack),
-  _finger(finger),
-  _global_finger_addr(global_finger_addr),
-  _parent(parent)
-{ }
-
-// Assumes thread-safe access by callers, who are
-// responsible for mutual exclusion.
-void CMSCollector::lower_restart_addr(HeapWord* low) {
-  assert(_span.contains(low), "Out of bounds addr");
-  if (_restart_addr == NULL) {
-    _restart_addr = low;
-  } else {
-    _restart_addr = MIN2(_restart_addr, low);
-  }
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_markStack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _markStack->reset();  // discard stack contents
-  _markStack->expand(); // expand the stack if possible
-}
-
-// Upon stack overflow, we discard (part of) the stack,
-// remembering the least address amongst those discarded
-// in CMSCollector's _restart_address.
-void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) {
-  // We need to do this under a mutex to prevent other
-  // workers from interfering with the work done below.
-  MutexLocker ml(_overflow_stack->par_lock(),
-                 Mutex::_no_safepoint_check_flag);
-  // Remember the least grey address discarded
-  HeapWord* ra = (HeapWord*)_overflow_stack->least_value(lost);
-  _collector->lower_restart_addr(ra);
-  _overflow_stack->reset();  // discard stack contents
-  _overflow_stack->expand(); // expand the stack if possible
-}
-
-void PushOrMarkClosure::do_oop(oop obj) {
-  // Ignore mark word because we are running concurrent with mutators.
-  assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr) && !_bitMap->isMarked(addr)) {
-    // Oop lies in _span and isn't yet grey or black
-    _bitMap->mark(addr);            // now grey
-    if (addr < _finger) {
-      // the bit map iteration has already either passed, or
-      // sampled, this bit in the bit map; we'll need to
-      // use the marking stack to scan this oop's oops.
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow || !_markStack->push(obj)) { // stack overflow
-        log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _markStack->capacity());
-        assert(simulate_overflow || _markStack->isFull(), "Else push should have succeeded");
-        handle_stack_overflow(addr);
-      }
-    }
-    // anything including and to the right of _finger
-    // will be scanned as we iterate over the remainder of the
-    // bit map
-    do_yield_check();
-  }
-}
-
-void ParPushOrMarkClosure::do_oop(oop obj) {
-  // Ignore mark word because we are running concurrent with mutators.
-  assert(oopDesc::is_oop_or_null(obj, true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
-  HeapWord* addr = (HeapWord*)obj;
-  if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // Oop lies in _span and isn't yet grey or black
-    // We read the global_finger (volatile read) strictly after marking oop
-    bool res = _bit_map->par_mark(addr);    // now grey
-    volatile HeapWord** gfa = (volatile HeapWord**)_global_finger_addr;
-    // Should we push this marked oop on our stack?
-    // -- if someone else marked it, nothing to do
-    // -- if target oop is above global finger nothing to do
-    // -- if target oop is in chunk and above local finger
-    //      then nothing to do
-    // -- else push on work queue
-    if (   !res       // someone else marked it, they will deal with it
-        || (addr >= *gfa)  // will be scanned in a later task
-        || (_span.contains(addr) && addr >= _finger)) { // later in this chunk
-      return;
-    }
-    // the bit map iteration has already either passed, or
-    // sampled, this bit in the bit map; we'll need to
-    // use the marking stack to scan this oop's oops.
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (CMSMarkStackOverflowALot &&
-          _collector->simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow ||
-        !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) {
-      // stack overflow
-      log_trace(gc)("CMS marking stack overflow (benign) at " SIZE_FORMAT, _overflow_stack->capacity());
-      // We cannot assert that the overflow stack is full because
-      // it may have been emptied since.
-      assert(simulate_overflow ||
-             _work_queue->size() == _work_queue->max_elems(),
-            "Else push should have succeeded");
-      handle_stack_overflow(addr);
-    }
-    do_yield_check();
-  }
-}
-
-PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector,
-                                       MemRegion span,
-                                       ReferenceDiscoverer* rd,
-                                       CMSBitMap* bit_map,
-                                       CMSBitMap* mod_union_table,
-                                       CMSMarkStack*  mark_stack,
-                                       bool           concurrent_precleaning):
-  MetadataVisitingOopIterateClosure(rd),
-  _collector(collector),
-  _span(span),
-  _bit_map(bit_map),
-  _mod_union_table(mod_union_table),
-  _mark_stack(mark_stack),
-  _concurrent_precleaning(concurrent_precleaning)
-{
-  assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
-}
-
-// Grey object rescan during pre-cleaning and second checkpoint phases --
-// the non-parallel version (the parallel version appears further below.)
-void PushAndMarkClosure::do_oop(oop obj) {
-  // Ignore mark word verification. If during concurrent precleaning,
-  // the object monitor may be locked. If during the checkpoint
-  // phases, the object may already have been reached by a  different
-  // path and may be at the end of the global overflow list (so
-  // the mark word may be NULL).
-  assert(oopDesc::is_oop_or_null(obj, true /* ignore mark word */),
-         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
-  HeapWord* addr = (HeapWord*)obj;
-  // Check if oop points into the CMS generation
-  // and is not marked
-  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // a white object ...
-    _bit_map->mark(addr);         // ... now grey
-    // push on the marking stack (grey set)
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (CMSMarkStackOverflowALot &&
-          _collector->simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow || !_mark_stack->push(obj)) {
-      if (_concurrent_precleaning) {
-         // During precleaning we can just dirty the appropriate card(s)
-         // in the mod union table, thus ensuring that the object remains
-         // in the grey set  and continue. In the case of object arrays
-         // we need to dirty all of the cards that the object spans,
-         // since the rescan of object arrays will be limited to the
-         // dirty cards.
-         // Note that no one can be interfering with us in this action
-         // of dirtying the mod union table, so no locking or atomics
-         // are required.
-         if (obj->is_objArray()) {
-           size_t sz = obj->size();
-           HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
-           MemRegion redirty_range = MemRegion(addr, end_card_addr);
-           assert(!redirty_range.is_empty(), "Arithmetical tautology");
-           _mod_union_table->mark_range(redirty_range);
-         } else {
-           _mod_union_table->mark(addr);
-         }
-         _collector->_ser_pmc_preclean_ovflw++;
-      } else {
-         // During the remark phase, we need to remember this oop
-         // in the overflow list.
-         _collector->push_on_overflow_list(obj);
-         _collector->_ser_pmc_remark_ovflw++;
-      }
-    }
-  }
-}
-
-ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector,
-                                             MemRegion span,
-                                             ReferenceDiscoverer* rd,
-                                             CMSBitMap* bit_map,
-                                             OopTaskQueue* work_queue):
-  MetadataVisitingOopIterateClosure(rd),
-  _collector(collector),
-  _span(span),
-  _bit_map(bit_map),
-  _work_queue(work_queue)
-{
-  assert(ref_discoverer() != NULL, "ref_discoverer shouldn't be NULL");
-}
-
-// Grey object rescan during second checkpoint phase --
-// the parallel version.
-void ParPushAndMarkClosure::do_oop(oop obj) {
-  // In the assert below, we ignore the mark word because
-  // this oop may point to an already visited object that is
-  // on the overflow stack (in which case the mark word has
-  // been hijacked for chaining into the overflow stack --
-  // if this is the last object in the overflow stack then
-  // its mark word will be NULL). Because this object may
-  // have been subsequently popped off the global overflow
-  // stack, and the mark word possibly restored to the prototypical
-  // value, by the time we get to examined this failing assert in
-  // the debugger, is_oop_or_null(false) may subsequently start
-  // to hold.
-  assert(oopDesc::is_oop_or_null(obj, true),
-         "Expected an oop or NULL at " PTR_FORMAT, p2i(obj));
-  HeapWord* addr = (HeapWord*)obj;
-  // Check if oop points into the CMS generation
-  // and is not marked
-  if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
-    // a white object ...
-    // If we manage to "claim" the object, by being the
-    // first thread to mark it, then we push it on our
-    // marking stack
-    if (_bit_map->par_mark(addr)) {     // ... now grey
-      // push on work queue (grey set)
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->par_simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow || !_work_queue->push(obj)) {
-        _collector->par_push_on_overflow_list(obj);
-        _collector->_par_pmc_remark_ovflw++; //  imprecise OK: no need to CAS
-      }
-    } // Else, some other thread got there first
-  }
-}
-
-void CMSPrecleanRefsYieldClosure::do_yield_work() {
-  Mutex* bml = _collector->bitMapLock();
-  assert_lock_strong(bml);
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-
-  bml->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-
-  _collector->stopTimer();
-  _collector->incrementYields();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-                       ConcurrentMarkSweepThread::should_yield() &&
-                       !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::naked_short_sleep(1);
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  bml->lock_without_safepoint_check();
-
-  _collector->startTimer();
-}
-
-bool CMSPrecleanRefsYieldClosure::should_return() {
-  if (ConcurrentMarkSweepThread::should_yield()) {
-    do_yield_work();
-  }
-  return _collector->foregroundGCIsActive();
-}
-
-void MarkFromDirtyCardsClosure::do_MemRegion(MemRegion mr) {
-  assert(((size_t)mr.start())%CardTable::card_size_in_words == 0,
-         "mr should be aligned to start at a card boundary");
-  // We'd like to assert:
-  // assert(mr.word_size()%CardTable::card_size_in_words == 0,
-  //        "mr should be a range of cards");
-  // However, that would be too strong in one case -- the last
-  // partition ends at _unallocated_block which, in general, can be
-  // an arbitrary boundary, not necessarily card aligned.
-  _num_dirty_cards += mr.word_size()/CardTable::card_size_in_words;
-  _space->object_iterate_mem(mr, &_scan_cl);
-}
-
-SweepClosure::SweepClosure(CMSCollector* collector,
-                           ConcurrentMarkSweepGeneration* g,
-                           CMSBitMap* bitMap, bool should_yield) :
-  _collector(collector),
-  _g(g),
-  _sp(g->cmsSpace()),
-  _limit(_sp->sweep_limit()),
-  _freelistLock(_sp->freelistLock()),
-  _bitMap(bitMap),
-  _inFreeRange(false),           // No free range at beginning of sweep
-  _freeRangeInFreeLists(false),  // No free range at beginning of sweep
-  _lastFreeRangeCoalesced(false),
-  _yield(should_yield),
-  _freeFinger(g->used_region().start())
-{
-  NOT_PRODUCT(
-    _numObjectsFreed = 0;
-    _numWordsFreed   = 0;
-    _numObjectsLive = 0;
-    _numWordsLive = 0;
-    _numObjectsAlreadyFree = 0;
-    _numWordsAlreadyFree = 0;
-    _last_fc = NULL;
-
-    _sp->initializeIndexedFreeListArrayReturnedBytes();
-    _sp->dictionary()->initialize_dict_returned_bytes();
-  )
-  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
-         "sweep _limit out of bounds");
-  log_develop_trace(gc, sweep)("====================");
-  log_develop_trace(gc, sweep)("Starting new sweep with limit " PTR_FORMAT, p2i(_limit));
-}
-
-void SweepClosure::print_on(outputStream* st) const {
-  st->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
-               p2i(_sp->bottom()), p2i(_sp->end()));
-  st->print_cr("_limit = " PTR_FORMAT, p2i(_limit));
-  st->print_cr("_freeFinger = " PTR_FORMAT, p2i(_freeFinger));
-  NOT_PRODUCT(st->print_cr("_last_fc = " PTR_FORMAT, p2i(_last_fc));)
-  st->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
-               _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
-}
-
-#ifndef PRODUCT
-// Assertion checking only:  no useful work in product mode --
-// however, if any of the flags below become product flags,
-// you may need to review this code to see if it needs to be
-// enabled in product mode.
-SweepClosure::~SweepClosure() {
-  assert_lock_strong(_freelistLock);
-  assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
-         "sweep _limit out of bounds");
-  if (inFreeRange()) {
-    Log(gc, sweep) log;
-    log.error("inFreeRange() should have been reset; dumping state of SweepClosure");
-    ResourceMark rm;
-    LogStream ls(log.error());
-    print_on(&ls);
-    ShouldNotReachHere();
-  }
-
-  if (log_is_enabled(Debug, gc, sweep)) {
-    log_debug(gc, sweep)("Collected " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
-                         _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
-    log_debug(gc, sweep)("Live " SIZE_FORMAT " objects,  " SIZE_FORMAT " bytes  Already free " SIZE_FORMAT " objects, " SIZE_FORMAT " bytes",
-                         _numObjectsLive, _numWordsLive*sizeof(HeapWord), _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
-    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) * sizeof(HeapWord);
-    log_debug(gc, sweep)("Total sweep: " SIZE_FORMAT " bytes", totalBytes);
-  }
-
-  if (log_is_enabled(Trace, gc, sweep) && CMSVerifyReturnedBytes) {
-    size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
-    size_t dict_returned_bytes = _sp->dictionary()->sum_dict_returned_bytes();
-    size_t returned_bytes = indexListReturnedBytes + dict_returned_bytes;
-    log_trace(gc, sweep)("Returned " SIZE_FORMAT " bytes   Indexed List Returned " SIZE_FORMAT " bytes        Dictionary Returned " SIZE_FORMAT " bytes",
-                         returned_bytes, indexListReturnedBytes, dict_returned_bytes);
-  }
-  log_develop_trace(gc, sweep)("end of sweep with _limit = " PTR_FORMAT, p2i(_limit));
-  log_develop_trace(gc, sweep)("================");
-}
-#endif  // PRODUCT
-
-void SweepClosure::initialize_free_range(HeapWord* freeFinger,
-    bool freeRangeInFreeLists) {
-  log_develop_trace(gc, sweep)("---- Start free range at " PTR_FORMAT " with free block (%d)",
-                               p2i(freeFinger), freeRangeInFreeLists);
-  assert(!inFreeRange(), "Trampling existing free range");
-  set_inFreeRange(true);
-  set_lastFreeRangeCoalesced(false);
-
-  set_freeFinger(freeFinger);
-  set_freeRangeInFreeLists(freeRangeInFreeLists);
-  if (CMSTestInFreeList) {
-    if (freeRangeInFreeLists) {
-      FreeChunk* fc = (FreeChunk*) freeFinger;
-      assert(fc->is_free(), "A chunk on the free list should be free.");
-      assert(fc->size() > 0, "Free range should have a size");
-      assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
-    }
-  }
-}
-
-// Note that the sweeper runs concurrently with mutators. Thus,
-// it is possible for direct allocation in this generation to happen
-// in the middle of the sweep. Note that the sweeper also coalesces
-// contiguous free blocks. Thus, unless the sweeper and the allocator
-// synchronize appropriately freshly allocated blocks may get swept up.
-// This is accomplished by the sweeper locking the free lists while
-// it is sweeping. Thus blocks that are determined to be free are
-// indeed free. There is however one additional complication:
-// blocks that have been allocated since the final checkpoint and
-// mark, will not have been marked and so would be treated as
-// unreachable and swept up. To prevent this, the allocator marks
-// the bit map when allocating during the sweep phase. This leads,
-// however, to a further complication -- objects may have been allocated
-// but not yet initialized -- in the sense that the header isn't yet
-// installed. The sweeper can not then determine the size of the block
-// in order to skip over it. To deal with this case, we use a technique
-// (due to Printezis) to encode such uninitialized block sizes in the
-// bit map. Since the bit map uses a bit per every HeapWord, but the
-// CMS generation has a minimum object size of 3 HeapWords, it follows
-// that "normal marks" won't be adjacent in the bit map (there will
-// always be at least two 0 bits between successive 1 bits). We make use
-// of these "unused" bits to represent uninitialized blocks -- the bit
-// corresponding to the start of the uninitialized object and the next
-// bit are both set. Finally, a 1 bit marks the end of the object that
-// started with the two consecutive 1 bits to indicate its potentially
-// uninitialized state.
-
-size_t SweepClosure::do_blk_careful(HeapWord* addr) {
-  FreeChunk* fc = (FreeChunk*)addr;
-  size_t res;
-
-  // Check if we are done sweeping. Below we check "addr >= _limit" rather
-  // than "addr == _limit" because although _limit was a block boundary when
-  // we started the sweep, it may no longer be one because heap expansion
-  // may have caused us to coalesce the block ending at the address _limit
-  // with a newly expanded chunk (this happens when _limit was set to the
-  // previous _end of the space), so we may have stepped past _limit:
-  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
-  if (addr >= _limit) { // we have swept up to or past the limit: finish up
-    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
-           "sweep _limit out of bounds");
-    assert(addr < _sp->end(), "addr out of bounds");
-    // Flush any free range we might be holding as a single
-    // coalesced chunk to the appropriate free list.
-    if (inFreeRange()) {
-      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
-             "freeFinger() " PTR_FORMAT " is out of bounds", p2i(freeFinger()));
-      flush_cur_free_chunk(freeFinger(),
-                           pointer_delta(addr, freeFinger()));
-      log_develop_trace(gc, sweep)("Sweep: last chunk: put_free_blk " PTR_FORMAT " (" SIZE_FORMAT ") [coalesced:%d]",
-                                   p2i(freeFinger()), pointer_delta(addr, freeFinger()),
-                                   lastFreeRangeCoalesced() ? 1 : 0);
-    }
-
-    // help the iterator loop finish
-    return pointer_delta(_sp->end(), addr);
-  }
-
-  assert(addr < _limit, "sweep invariant");
-  // check if we should yield
-  do_yield_check(addr);
-  if (fc->is_free()) {
-    // Chunk that is already free
-    res = fc->size();
-    do_already_free_chunk(fc);
-    debug_only(_sp->verifyFreeLists());
-    // If we flush the chunk at hand in lookahead_and_flush()
-    // and it's coalesced with a preceding chunk, then the
-    // process of "mangling" the payload of the coalesced block
-    // will cause erasure of the size information from the
-    // (erstwhile) header of all the coalesced blocks but the
-    // first, so the first disjunct in the assert will not hold
-    // in that specific case (in which case the second disjunct
-    // will hold).
-    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
-           "Otherwise the size info doesn't change at this step");
-    NOT_PRODUCT(
-      _numObjectsAlreadyFree++;
-      _numWordsAlreadyFree += res;
-    )
-    NOT_PRODUCT(_last_fc = fc;)
-  } else if (!_bitMap->isMarked(addr)) {
-    // Chunk is fresh garbage
-    res = do_garbage_chunk(fc);
-    debug_only(_sp->verifyFreeLists());
-    NOT_PRODUCT(
-      _numObjectsFreed++;
-      _numWordsFreed += res;
-    )
-  } else {
-    // Chunk that is alive.
-    res = do_live_chunk(fc);
-    debug_only(_sp->verifyFreeLists());
-    NOT_PRODUCT(
-        _numObjectsLive++;
-        _numWordsLive += res;
-    )
-  }
-  return res;
-}
-
-// For the smart allocation, record following
-//  split deaths - a free chunk is removed from its free list because
-//      it is being split into two or more chunks.
-//  split birth - a free chunk is being added to its free list because
-//      a larger free chunk has been split and resulted in this free chunk.
-//  coal death - a free chunk is being removed from its free list because
-//      it is being coalesced into a large free chunk.
-//  coal birth - a free chunk is being added to its free list because
-//      it was created when two or more free chunks where coalesced into
-//      this free chunk.
-//
-// These statistics are used to determine the desired number of free
-// chunks of a given size.  The desired number is chosen to be relative
-// to the end of a CMS sweep.  The desired number at the end of a sweep
-// is the
-//      count-at-end-of-previous-sweep (an amount that was enough)
-//              - count-at-beginning-of-current-sweep  (the excess)
-//              + split-births  (gains in this size during interval)
-//              - split-deaths  (demands on this size during interval)
-// where the interval is from the end of one sweep to the end of the
-// next.
-//
-// When sweeping the sweeper maintains an accumulated chunk which is
-// the chunk that is made up of chunks that have been coalesced.  That
-// will be termed the left-hand chunk.  A new chunk of garbage that
-// is being considered for coalescing will be referred to as the
-// right-hand chunk.
-//
-// When making a decision on whether to coalesce a right-hand chunk with
-// the current left-hand chunk, the current count vs. the desired count
-// of the left-hand chunk is considered.  Also if the right-hand chunk
-// is near the large chunk at the end of the heap (see
-// ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
-// left-hand chunk is coalesced.
-//
-// When making a decision about whether to split a chunk, the desired count
-// vs. the current count of the candidate to be split is also considered.
-// If the candidate is underpopulated (currently fewer chunks than desired)
-// a chunk of an overpopulated (currently more chunks than desired) size may
-// be chosen.  The "hint" associated with a free list, if non-null, points
-// to a free list which may be overpopulated.
-//
-
-void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
-  const size_t size = fc->size();
-  // Chunks that cannot be coalesced are not in the
-  // free lists.
-  if (CMSTestInFreeList && !fc->cantCoalesce()) {
-    assert(_sp->verify_chunk_in_free_list(fc),
-           "free chunk should be in free lists");
-  }
-  // a chunk that is already free, should not have been
-  // marked in the bit map
-  HeapWord* const addr = (HeapWord*) fc;
-  assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
-  // Verify that the bit map has no bits marked between
-  // addr and purported end of this block.
-  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-
-  // Some chunks cannot be coalesced under any circumstances.
-  // See the definition of cantCoalesce().
-  if (!fc->cantCoalesce()) {
-    // This chunk can potentially be coalesced.
-    // All the work is done in
-    do_post_free_or_garbage_chunk(fc, size);
-    // Note that if the chunk is not coalescable (the else arm
-    // below), we unconditionally flush, without needing to do
-    // a "lookahead," as we do below.
-    if (inFreeRange()) lookahead_and_flush(fc, size);
-  } else {
-    // Code path common to both original and adaptive free lists.
-
-    // cant coalesce with previous block; this should be treated
-    // as the end of a free run if any
-    if (inFreeRange()) {
-      // we kicked some butt; time to pick up the garbage
-      assert(freeFinger() < addr, "freeFinger points too high");
-      flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
-    }
-    // else, nothing to do, just continue
-  }
-}
-
-size_t SweepClosure::do_garbage_chunk(FreeChunk* fc) {
-  // This is a chunk of garbage.  It is not in any free list.
-  // Add it to a free list or let it possibly be coalesced into
-  // a larger chunk.
-  HeapWord* const addr = (HeapWord*) fc;
-  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
-
-  // Verify that the bit map has no bits marked between
-  // addr and purported end of just dead object.
-  _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
-  do_post_free_or_garbage_chunk(fc, size);
-
-  assert(_limit >= addr + size,
-         "A freshly garbage chunk can't possibly straddle over _limit");
-  if (inFreeRange()) lookahead_and_flush(fc, size);
-  return size;
-}
-
-size_t SweepClosure::do_live_chunk(FreeChunk* fc) {
-  HeapWord* addr = (HeapWord*) fc;
-  // The sweeper has just found a live object. Return any accumulated
-  // left hand chunk to the free lists.
-  if (inFreeRange()) {
-    assert(freeFinger() < addr, "freeFinger points too high");
-    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
-  }
-
-  // This object is live: we'd normally expect this to be
-  // an oop, and like to assert the following:
-  // assert(oopDesc::is_oop(oop(addr)), "live block should be an oop");
-  // However, as we commented above, this may be an object whose
-  // header hasn't yet been initialized.
-  size_t size;
-  assert(_bitMap->isMarked(addr), "Tautology for this control point");
-  if (_bitMap->isMarked(addr + 1)) {
-    // Determine the size from the bit map, rather than trying to
-    // compute it from the object header.
-    HeapWord* nextOneAddr = _bitMap->getNextMarkedWordAddress(addr + 2);
-    size = pointer_delta(nextOneAddr + 1, addr);
-    assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
-           "alignment problem");
-
-#ifdef ASSERT
-      if (oop(addr)->klass_or_null_acquire() != NULL) {
-        // Ignore mark word because we are running concurrent with mutators
-        assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
-        assert(size ==
-               CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()),
-               "P-mark and computed size do not agree");
-      }
-#endif
-
-  } else {
-    // This should be an initialized object that's alive.
-    assert(oop(addr)->klass_or_null_acquire() != NULL,
-           "Should be an initialized object");
-    // Ignore mark word because we are running concurrent with mutators
-    assert(oopDesc::is_oop(oop(addr), true), "live block should be an oop");
-    // Verify that the bit map has no bits marked between
-    // addr and purported end of this block.
-    size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
-    assert(size >= 3, "Necessary for Printezis marks to work");
-    assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
-    DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
-  }
-  return size;
-}
-
-void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
-                                                 size_t chunkSize) {
-  // do_post_free_or_garbage_chunk() should only be called in the case
-  // of the adaptive free list allocator.
-  const bool fcInFreeLists = fc->is_free();
-  assert((HeapWord*)fc <= _limit, "sweep invariant");
-  if (CMSTestInFreeList && fcInFreeLists) {
-    assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
-  }
-
-  log_develop_trace(gc, sweep)("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
-
-  HeapWord* const fc_addr = (HeapWord*) fc;
-
-  bool coalesce = false;
-  const size_t left  = pointer_delta(fc_addr, freeFinger());
-  const size_t right = chunkSize;
-  switch (FLSCoalescePolicy) {
-    // numeric value forms a coalition aggressiveness metric
-    case 0:  { // never coalesce
-      coalesce = false;
-      break;
-    }
-    case 1: { // coalesce if left & right chunks on overpopulated lists
-      coalesce = _sp->coalOverPopulated(left) &&
-                 _sp->coalOverPopulated(right);
-      break;
-    }
-    case 2: { // coalesce if left chunk on overpopulated list (default)
-      coalesce = _sp->coalOverPopulated(left);
-      break;
-    }
-    case 3: { // coalesce if left OR right chunk on overpopulated list
-      coalesce = _sp->coalOverPopulated(left) ||
-                 _sp->coalOverPopulated(right);
-      break;
-    }
-    case 4: { // always coalesce
-      coalesce = true;
-      break;
-    }
-    default:
-     ShouldNotReachHere();
-  }
-
-  // Should the current free range be coalesced?
-  // If the chunk is in a free range and either we decided to coalesce above
-  // or the chunk is near the large block at the end of the heap
-  // (isNearLargestChunk() returns true), then coalesce this chunk.
-  const bool doCoalesce = inFreeRange()
-                          && (coalesce || _g->isNearLargestChunk(fc_addr));
-  if (doCoalesce) {
-    // Coalesce the current free range on the left with the new
-    // chunk on the right.  If either is on a free list,
-    // it must be removed from the list and stashed in the closure.
-    if (freeRangeInFreeLists()) {
-      FreeChunk* const ffc = (FreeChunk*)freeFinger();
-      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
-             "Size of free range is inconsistent with chunk size.");
-      if (CMSTestInFreeList) {
-        assert(_sp->verify_chunk_in_free_list(ffc),
-               "Chunk is not in free lists");
-      }
-      _sp->coalDeath(ffc->size());
-      _sp->removeFreeChunkFromFreeLists(ffc);
-      set_freeRangeInFreeLists(false);
-    }
-    if (fcInFreeLists) {
-      _sp->coalDeath(chunkSize);
-      assert(fc->size() == chunkSize,
-        "The chunk has the wrong size or is not in the free lists");
-      _sp->removeFreeChunkFromFreeLists(fc);
-    }
-    set_lastFreeRangeCoalesced(true);
-    print_free_block_coalesced(fc);
-  } else {  // not in a free range and/or should not coalesce
-    // Return the current free range and start a new one.
-    if (inFreeRange()) {
-      // In a free range but cannot coalesce with the right hand chunk.
-      // Put the current free range into the free lists.
-      flush_cur_free_chunk(freeFinger(),
-                           pointer_delta(fc_addr, freeFinger()));
-    }
-    // Set up for new free range.  Pass along whether the right hand
-    // chunk is in the free lists.
-    initialize_free_range((HeapWord*)fc, fcInFreeLists);
-  }
-}
-
-// Lookahead flush:
-// If we are tracking a free range, and this is the last chunk that
-// we'll look at because its end crosses past _limit, we'll preemptively
-// flush it along with any free range we may be holding on to. Note that
-// this can be the case only for an already free or freshly garbage
-// chunk. If this block is an object, it can never straddle
-// over _limit. The "straddling" occurs when _limit is set at
-// the previous end of the space when this cycle started, and
-// a subsequent heap expansion caused the previously co-terminal
-// free block to be coalesced with the newly expanded portion,
-// thus rendering _limit a non-block-boundary making it dangerous
-// for the sweeper to step over and examine.
-void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
-  assert(inFreeRange(), "Should only be called if currently in a free range.");
-  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
-  assert(_sp->used_region().contains(eob - 1),
-         "eob = " PTR_FORMAT " eob-1 = " PTR_FORMAT " _limit = " PTR_FORMAT
-         " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
-         " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
-         p2i(eob), p2i(eob-1), p2i(_limit), p2i(_sp->bottom()), p2i(_sp->end()), p2i(fc), chunk_size);
-  if (eob >= _limit) {
-    assert(eob == _limit || fc->is_free(), "Only a free chunk should allow us to cross over the limit");
-    log_develop_trace(gc, sweep)("_limit " PTR_FORMAT " reached or crossed by block "
-                                 "[" PTR_FORMAT "," PTR_FORMAT ") in space "
-                                 "[" PTR_FORMAT "," PTR_FORMAT ")",
-                                 p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
-    // Return the storage we are tracking back into the free lists.
-    log_develop_trace(gc, sweep)("Flushing ... ");
-    assert(freeFinger() < eob, "Error");
-    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
-  }
-}
-
-void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
-  assert(inFreeRange(), "Should only be called if currently in a free range.");
-  assert(size > 0,
-    "A zero sized chunk cannot be added to the free lists.");
-  if (!freeRangeInFreeLists()) {
-    if (CMSTestInFreeList) {
-      FreeChunk* fc = (FreeChunk*) chunk;
-      fc->set_size(size);
-      assert(!_sp->verify_chunk_in_free_list(fc),
-             "chunk should not be in free lists yet");
-    }
-    log_develop_trace(gc, sweep)(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size);
-    // A new free range is going to be starting.  The current
-    // free range has not been added to the free lists yet or
-    // was removed so add it back.
-    // If the current free range was coalesced, then the death
-    // of the free range was recorded.  Record a birth now.
-    if (lastFreeRangeCoalesced()) {
-      _sp->coalBirth(size);
-    }
-    _sp->addChunkAndRepairOffsetTable(chunk, size,
-            lastFreeRangeCoalesced());
-  } else {
-    log_develop_trace(gc, sweep)("Already in free list: nothing to flush");
-  }
-  set_inFreeRange(false);
-  set_freeRangeInFreeLists(false);
-}
-
-// We take a break if we've been at this for a while,
-// so as to avoid monopolizing the locks involved.
-void SweepClosure::do_yield_work(HeapWord* addr) {
-  // Return current free chunk being used for coalescing (if any)
-  // to the appropriate freelist.  After yielding, the next
-  // free block encountered will start a coalescing range of
-  // free blocks.  If the next free block is adjacent to the
-  // chunk just flushed, they will need to wait for the next
-  // sweep to be coalesced.
-  if (inFreeRange()) {
-    flush_cur_free_chunk(freeFinger(), pointer_delta(addr, freeFinger()));
-  }
-
-  // First give up the locks, then yield, then re-lock.
-  // We should probably use a constructor/destructor idiom to
-  // do this unlock/lock or modify the MutexUnlocker class to
-  // serve our purpose. XXX
-  assert_lock_strong(_bitMap->lock());
-  assert_lock_strong(_freelistLock);
-  assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
-         "CMS thread should hold CMS token");
-  _bitMap->lock()->unlock();
-  _freelistLock->unlock();
-  ConcurrentMarkSweepThread::desynchronize(true);
-  _collector->stopTimer();
-  _collector->incrementYields();
-
-  // See the comment in coordinator_yield()
-  for (unsigned i = 0; i < CMSYieldSleepCount &&
-                       ConcurrentMarkSweepThread::should_yield() &&
-                       !CMSCollector::foregroundGCIsActive(); ++i) {
-    os::naked_short_sleep(1);
-  }
-
-  ConcurrentMarkSweepThread::synchronize(true);
-  _freelistLock->lock_without_safepoint_check();
-  _bitMap->lock()->lock_without_safepoint_check();
-  _collector->startTimer();
-}
-
-#ifndef PRODUCT
-// This is actually very useful in a product build if it can
-// be called from the debugger.  Compile it into the product
-// as needed.
-bool debug_verify_chunk_in_free_list(FreeChunk* fc) {
-  return debug_cms_space->verify_chunk_in_free_list(fc);
-}
-#endif
-
-void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
-  log_develop_trace(gc, sweep)("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
-                               p2i(fc), fc->size());
-}
-
-// CMSIsAliveClosure
-bool CMSIsAliveClosure::do_object_b(oop obj) {
-  HeapWord* addr = (HeapWord*)obj;
-  return addr != NULL &&
-         (!_span.contains(addr) || _bit_map->isMarked(addr));
-}
-
-CMSKeepAliveClosure::CMSKeepAliveClosure( CMSCollector* collector,
-                      MemRegion span,
-                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
-                      bool cpc):
-  _collector(collector),
-  _span(span),
-  _mark_stack(mark_stack),
-  _bit_map(bit_map),
-  _concurrent_precleaning(cpc) {
-  assert(!_span.is_empty(), "Empty span could spell trouble");
-}
-
-
-// CMSKeepAliveClosure: the serial version
-void CMSKeepAliveClosure::do_oop(oop obj) {
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr) &&
-      !_bit_map->isMarked(addr)) {
-    _bit_map->mark(addr);
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (CMSMarkStackOverflowALot &&
-          _collector->simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow || !_mark_stack->push(obj)) {
-      if (_concurrent_precleaning) {
-        // We dirty the overflown object and let the remark
-        // phase deal with it.
-        assert(_collector->overflow_list_is_empty(), "Error");
-        // In the case of object arrays, we need to dirty all of
-        // the cards that the object spans. No locking or atomics
-        // are needed since no one else can be mutating the mod union
-        // table.
-        if (obj->is_objArray()) {
-          size_t sz = obj->size();
-          HeapWord* end_card_addr = align_up(addr + sz, CardTable::card_size);
-          MemRegion redirty_range = MemRegion(addr, end_card_addr);
-          assert(!redirty_range.is_empty(), "Arithmetical tautology");
-          _collector->_modUnionTable.mark_range(redirty_range);
-        } else {
-          _collector->_modUnionTable.mark(addr);
-        }
-        _collector->_ser_kac_preclean_ovflw++;
-      } else {
-        _collector->push_on_overflow_list(obj);
-        _collector->_ser_kac_ovflw++;
-      }
-    }
-  }
-}
-
-// CMSParKeepAliveClosure: a parallel version of the above.
-// The work queues are private to each closure (thread),
-// but (may be) available for stealing by other threads.
-void CMSParKeepAliveClosure::do_oop(oop obj) {
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr) &&
-      !_bit_map->isMarked(addr)) {
-    // In general, during recursive tracing, several threads
-    // may be concurrently getting here; the first one to
-    // "tag" it, claims it.
-    if (_bit_map->par_mark(addr)) {
-      bool res = _work_queue->push(obj);
-      assert(res, "Low water mark should be much less than capacity");
-      // Do a recursive trim in the hope that this will keep
-      // stack usage lower, but leave some oops for potential stealers
-      trim_queue(_low_water_mark);
-    } // Else, another thread got there first
-  }
-}
-
-void CMSParKeepAliveClosure::trim_queue(uint max) {
-  while (_work_queue->size() > max) {
-    oop new_oop;
-    if (_work_queue->pop_local(new_oop)) {
-      assert(new_oop != NULL && oopDesc::is_oop(new_oop), "Expected an oop");
-      assert(_bit_map->isMarked((HeapWord*)new_oop),
-             "no white objects on this stack!");
-      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
-      // iterate over the oops in this oop, marking and pushing
-      // the ones in CMS heap (i.e. in _span).
-      new_oop->oop_iterate(&_mark_and_push);
-    }
-  }
-}
-
-CMSInnerParMarkAndPushClosure::CMSInnerParMarkAndPushClosure(
-                                CMSCollector* collector,
-                                MemRegion span, CMSBitMap* bit_map,
-                                OopTaskQueue* work_queue):
-  _collector(collector),
-  _span(span),
-  _work_queue(work_queue),
-  _bit_map(bit_map) { }
-
-void CMSInnerParMarkAndPushClosure::do_oop(oop obj) {
-  HeapWord* addr = (HeapWord*)obj;
-  if (_span.contains(addr) &&
-      !_bit_map->isMarked(addr)) {
-    if (_bit_map->par_mark(addr)) {
-      bool simulate_overflow = false;
-      NOT_PRODUCT(
-        if (CMSMarkStackOverflowALot &&
-            _collector->par_simulate_overflow()) {
-          // simulate a stack overflow
-          simulate_overflow = true;
-        }
-      )
-      if (simulate_overflow || !_work_queue->push(obj)) {
-        _collector->par_push_on_overflow_list(obj);
-        _collector->_par_kac_ovflw++;
-      }
-    } // Else another thread got there already
-  }
-}
-
-//////////////////////////////////////////////////////////////////
-//  CMSExpansionCause                /////////////////////////////
-//////////////////////////////////////////////////////////////////
-const char* CMSExpansionCause::to_string(CMSExpansionCause::Cause cause) {
-  switch (cause) {
-    case _no_expansion:
-      return "No expansion";
-    case _satisfy_free_ratio:
-      return "Free ratio";
-    case _satisfy_promotion:
-      return "Satisfy promotion";
-    case _satisfy_allocation:
-      return "allocation";
-    case _allocate_par_lab:
-      return "Par LAB";
-    case _allocate_par_spooling_space:
-      return "Par Spooling Space";
-    case _adaptive_size_policy:
-      return "Ergonomics";
-    default:
-      return "unknown";
-  }
-}
-
-void CMSDrainMarkingStackClosure::do_void() {
-  // the max number to take from overflow list at a time
-  const size_t num = _mark_stack->capacity()/4;
-  assert(!_concurrent_precleaning || _collector->overflow_list_is_empty(),
-         "Overflow list should be NULL during concurrent phases");
-  while (!_mark_stack->isEmpty() ||
-         // if stack is empty, check the overflow list
-         _collector->take_from_overflow_list(num, _mark_stack)) {
-    oop obj = _mark_stack->pop();
-    HeapWord* addr = (HeapWord*)obj;
-    assert(_span.contains(addr), "Should be within span");
-    assert(_bit_map->isMarked(addr), "Should be marked");
-    assert(oopDesc::is_oop(obj), "Should be an oop");
-    obj->oop_iterate(_keep_alive);
-  }
-}
-
-void CMSParDrainMarkingStackClosure::do_void() {
-  // drain queue
-  trim_queue(0);
-}
-
-// Trim our work_queue so its length is below max at return
-void CMSParDrainMarkingStackClosure::trim_queue(uint max) {
-  while (_work_queue->size() > max) {
-    oop new_oop;
-    if (_work_queue->pop_local(new_oop)) {
-      assert(oopDesc::is_oop(new_oop), "Expected an oop");
-      assert(_bit_map->isMarked((HeapWord*)new_oop),
-             "no white objects on this stack!");
-      assert(_span.contains((HeapWord*)new_oop), "Out of bounds oop");
-      // iterate over the oops in this oop, marking and pushing
-      // the ones in CMS heap (i.e. in _span).
-      new_oop->oop_iterate(&_mark_and_push);
-    }
-  }
-}
-
-////////////////////////////////////////////////////////////////////
-// Support for Marking Stack Overflow list handling and related code
-////////////////////////////////////////////////////////////////////
-// Much of the following code is similar in shape and spirit to the
-// code used in ParNewGC. We should try and share that code
-// as much as possible in the future.
-
-#ifndef PRODUCT
-// Debugging support for CMSStackOverflowALot
-
-// It's OK to call this multi-threaded;  the worst thing
-// that can happen is that we'll get a bunch of closely
-// spaced simulated overflows, but that's OK, in fact
-// probably good as it would exercise the overflow code
-// under contention.
-bool CMSCollector::simulate_overflow() {
-  if (_overflow_counter-- <= 0) { // just being defensive
-    _overflow_counter = CMSMarkStackOverflowInterval;
-    return true;
-  } else {
-    return false;
-  }
-}
-
-bool CMSCollector::par_simulate_overflow() {
-  return simulate_overflow();
-}
-#endif
-
-// Single-threaded
-bool CMSCollector::take_from_overflow_list(size_t num, CMSMarkStack* stack) {
-  assert(stack->isEmpty(), "Expected precondition");
-  assert(stack->capacity() > num, "Shouldn't bite more than can chew");
-  size_t i = num;
-  oop  cur = _overflow_list;
-  const markWord proto = markWord::prototype();
-  NOT_PRODUCT(ssize_t n = 0;)
-  for (oop next; i > 0 && cur != NULL; cur = next, i--) {
-    next = oop(cur->mark_raw().to_pointer());
-    cur->set_mark_raw(proto);   // until proven otherwise
-    assert(oopDesc::is_oop(cur), "Should be an oop");
-    bool res = stack->push(cur);
-    assert(res, "Bit off more than can chew?");
-    NOT_PRODUCT(n++;)
-  }
-  _overflow_list = cur;
-#ifndef PRODUCT
-  assert(_num_par_pushes >= n, "Too many pops?");
-  _num_par_pushes -=n;
-#endif
-  return !stack->isEmpty();
-}
-
-#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
-// (MT-safe) Get a prefix of at most "num" from the list.
-// The overflow list is chained through the mark word of
-// each object in the list. We fetch the entire list,
-// break off a prefix of the right size and return the
-// remainder. If other threads try to take objects from
-// the overflow list at that time, they will wait for
-// some time to see if data becomes available. If (and
-// only if) another thread places one or more object(s)
-// on the global list before we have returned the suffix
-// to the global list, we will walk down our local list
-// to find its end and append the global list to
-// our suffix before returning it. This suffix walk can
-// prove to be expensive (quadratic in the amount of traffic)
-// when there are many objects in the overflow list and
-// there is much producer-consumer contention on the list.
-// *NOTE*: The overflow list manipulation code here and
-// in ParNewGeneration:: are very similar in shape,
-// except that in the ParNew case we use the old (from/eden)
-// copy of the object to thread the list via its klass word.
-// Because of the common code, if you make any changes in
-// the code below, please check the ParNew version to see if
-// similar changes might be needed.
-// CR 6797058 has been filed to consolidate the common code.
-bool CMSCollector::par_take_from_overflow_list(size_t num,
-                                               OopTaskQueue* work_q,
-                                               int no_of_gc_threads) {
-  assert(work_q->size() == 0, "First empty local work queue");
-  assert(num < work_q->max_elems(), "Can't bite more than we can chew");
-  if (_overflow_list == NULL) {
-    return false;
-  }
-  // Grab the entire list; we'll put back a suffix
-  oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
-  // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
-  // set to ParallelGCThreads.
-  size_t CMSOverflowSpinCount = (size_t) no_of_gc_threads; // was ParallelGCThreads;
-  size_t sleep_time_millis = MAX2((size_t)1, num/100);
-  // If the list is busy, we spin for a short while,
-  // sleeping between attempts to get the list.
-  for (size_t spin = 0; prefix == BUSY && spin < CMSOverflowSpinCount; spin++) {
-    os::naked_sleep(sleep_time_millis);
-    if (_overflow_list == NULL) {
-      // Nothing left to take
-      return false;
-    } else if (_overflow_list != BUSY) {
-      // Try and grab the prefix
-      prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
-    }
-  }
-  // If the list was found to be empty, or we spun long
-  // enough, we give up and return empty-handed. If we leave
-  // the list in the BUSY state below, it must be the case that
-  // some other thread holds the overflow list and will set it
-  // to a non-BUSY state in the future.
-  if (prefix == NULL || prefix == BUSY) {
-     // Nothing to take or waited long enough
-     if (prefix == NULL) {
-       // Write back the NULL in case we overwrote it with BUSY above
-       // and it is still the same value.
-       Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
-     }
-     return false;
-  }
-  assert(prefix != NULL && prefix != BUSY, "Error");
-  size_t i = num;
-  oop cur = prefix;
-  // Walk down the first "num" objects, unless we reach the end.
-  for (; i > 1 && cur->mark_raw().to_pointer() != NULL; cur = oop(cur->mark_raw().to_pointer()), i--);
-  if (cur->mark_raw().to_pointer() == NULL) {
-    // We have "num" or fewer elements in the list, so there
-    // is nothing to return to the global list.
-    // Write back the NULL in lieu of the BUSY we wrote
-    // above, if it is still the same value.
-    if (_overflow_list == BUSY) {
-      Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
-    }
-  } else {
-    // Chop off the suffix and return it to the global list.
-    assert(cur->mark_raw().to_pointer() != (void*)BUSY, "Error");
-    oop suffix_head = oop(cur->mark_raw().to_pointer()); // suffix will be put back on global list
-    cur->set_mark_raw(markWord::from_pointer(NULL));     // break off suffix
-    // It's possible that the list is still in the empty(busy) state
-    // we left it in a short while ago; in that case we may be
-    // able to place back the suffix without incurring the cost
-    // of a walk down the list.
-    oop observed_overflow_list = _overflow_list;
-    oop cur_overflow_list = observed_overflow_list;
-    bool attached = false;
-    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
-      observed_overflow_list =
-        Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
-      if (cur_overflow_list == observed_overflow_list) {
-        attached = true;
-        break;
-      } else cur_overflow_list = observed_overflow_list;
-    }
-    if (!attached) {
-      // Too bad, someone else sneaked in (at least) an element; we'll need
-      // to do a splice. Find tail of suffix so we can prepend suffix to global
-      // list.
-      for (cur = suffix_head; cur->mark_raw().to_pointer() != NULL; cur = (oop)(cur->mark_raw().to_pointer()));
-      oop suffix_tail = cur;
-      assert(suffix_tail != NULL && suffix_tail->mark_raw().to_pointer() == NULL,
-             "Tautology");
-      observed_overflow_list = _overflow_list;
-      do {
-        cur_overflow_list = observed_overflow_list;
-        if (cur_overflow_list != BUSY) {
-          // Do the splice ...
-          suffix_tail->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
-        } else { // cur_overflow_list == BUSY
-          suffix_tail->set_mark_raw(markWord::from_pointer(NULL));
-        }
-        // ... and try to place spliced list back on overflow_list ...
-        observed_overflow_list =
-          Atomic::cmpxchg((oopDesc*)suffix_head, &_overflow_list, (oopDesc*)cur_overflow_list);
-      } while (cur_overflow_list != observed_overflow_list);
-      // ... until we have succeeded in doing so.
-    }
-  }
-
-  // Push the prefix elements on work_q
-  assert(prefix != NULL, "control point invariant");
-  const markWord proto = markWord::prototype();
-  oop next;
-  NOT_PRODUCT(ssize_t n = 0;)
-  for (cur = prefix; cur != NULL; cur = next) {
-    next = oop(cur->mark_raw().to_pointer());
-    cur->set_mark_raw(proto);   // until proven otherwise
-    assert(oopDesc::is_oop(cur), "Should be an oop");
-    bool res = work_q->push(cur);
-    assert(res, "Bit off more than we can chew?");
-    NOT_PRODUCT(n++;)
-  }
-#ifndef PRODUCT
-  assert(_num_par_pushes >= n, "Too many pops?");
-  Atomic::sub(n, &_num_par_pushes);
-#endif
-  return true;
-}
-
-// Single-threaded
-void CMSCollector::push_on_overflow_list(oop p) {
-  NOT_PRODUCT(_num_par_pushes++;)
-  assert(oopDesc::is_oop(p), "Not an oop");
-  preserve_mark_if_necessary(p);
-  p->set_mark_raw(markWord::from_pointer(_overflow_list));
-  _overflow_list = p;
-}
-
-// Multi-threaded; use CAS to prepend to overflow list
-void CMSCollector::par_push_on_overflow_list(oop p) {
-  NOT_PRODUCT(Atomic::inc(&_num_par_pushes);)
-  assert(oopDesc::is_oop(p), "Not an oop");
-  par_preserve_mark_if_necessary(p);
-  oop observed_overflow_list = _overflow_list;
-  oop cur_overflow_list;
-  do {
-    cur_overflow_list = observed_overflow_list;
-    if (cur_overflow_list != BUSY) {
-      p->set_mark_raw(markWord::from_pointer((void*)cur_overflow_list));
-    } else {
-      p->set_mark_raw(markWord::from_pointer(NULL));
-    }
-    observed_overflow_list =
-      Atomic::cmpxchg((oopDesc*)p, &_overflow_list, (oopDesc*)cur_overflow_list);
-  } while (cur_overflow_list != observed_overflow_list);
-}
-#undef BUSY
-
-// Single threaded
-// General Note on GrowableArray: pushes may silently fail
-// because we are (temporarily) out of C-heap for expanding
-// the stack. The problem is quite ubiquitous and affects
-// a lot of code in the JVM. The prudent thing for GrowableArray
-// to do (for now) is to exit with an error. However, that may
-// be too draconian in some cases because the caller may be
-// able to recover without much harm. For such cases, we
-// should probably introduce a "soft_push" method which returns
-// an indication of success or failure with the assumption that
-// the caller may be able to recover from a failure; code in
-// the VM can then be changed, incrementally, to deal with such
-// failures where possible, thus, incrementally hardening the VM
-// in such low resource situations.
-void CMSCollector::preserve_mark_work(oop p, markWord m) {
-  _preserved_oop_stack.push(p);
-  _preserved_mark_stack.push(m);
-  assert(m == p->mark_raw(), "Mark word changed");
-  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
-         "bijection");
-}
-
-// Single threaded
-void CMSCollector::preserve_mark_if_necessary(oop p) {
-  markWord m = p->mark_raw();
-  if (p->mark_must_be_preserved(m)) {
-    preserve_mark_work(p, m);
-  }
-}
-
-void CMSCollector::par_preserve_mark_if_necessary(oop p) {
-  markWord m = p->mark_raw();
-  if (p->mark_must_be_preserved(m)) {
-    MutexLocker x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-    // Even though we read the mark word without holding
-    // the lock, we are assured that it will not change
-    // because we "own" this oop, so no other thread can
-    // be trying to push it on the overflow list; see
-    // the assertion in preserve_mark_work() that checks
-    // that m == p->mark_raw().
-    preserve_mark_work(p, m);
-  }
-}
-
-// We should be able to do this multi-threaded,
-// a chunk of stack being a task (this is
-// correct because each oop only ever appears
-// once in the overflow list. However, it's
-// not very easy to completely overlap this with
-// other operations, so will generally not be done
-// until all work's been completed. Because we
-// expect the preserved oop stack (set) to be small,
-// it's probably fine to do this single-threaded.
-// We can explore cleverer concurrent/overlapped/parallel
-// processing of preserved marks if we feel the
-// need for this in the future. Stack overflow should
-// be so rare in practice and, when it happens, its
-// effect on performance so great that this will
-// likely just be in the noise anyway.
-void CMSCollector::restore_preserved_marks_if_any() {
-  assert(SafepointSynchronize::is_at_safepoint(),
-         "world should be stopped");
-  assert(Thread::current()->is_ConcurrentGC_thread() ||
-         Thread::current()->is_VM_thread(),
-         "should be single-threaded");
-  assert(_preserved_oop_stack.size() == _preserved_mark_stack.size(),
-         "bijection");
-
-  while (!_preserved_oop_stack.is_empty()) {
-    oop p = _preserved_oop_stack.pop();
-    assert(oopDesc::is_oop(p), "Should be an oop");
-    assert(_span.contains(p), "oop should be in _span");
-    assert(p->mark_raw() == markWord::prototype(),
-           "Set when taken from overflow list");
-    markWord m = _preserved_mark_stack.pop();
-    p->set_mark_raw(m);
-  }
-  assert(_preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty(),
-         "stacks were cleared above");
-}
-
-#ifndef PRODUCT
-bool CMSCollector::no_preserved_marks() const {
-  return _preserved_mark_stack.is_empty() && _preserved_oop_stack.is_empty();
-}
-#endif
-
-// Transfer some number of overflown objects to usual marking
-// stack. Return true if some objects were transferred.
-bool MarkRefsIntoAndScanClosure::take_from_overflow_list() {
-  size_t num = MIN2((size_t)(_mark_stack->capacity() - _mark_stack->length())/4,
-                    (size_t)ParGCDesiredObjsFromOverflowList);
-
-  bool res = _collector->take_from_overflow_list(num, _mark_stack);
-  assert(_collector->overflow_list_is_empty() || res,
-         "If list is not empty, we should have taken something");
-  assert(!res || !_mark_stack->isEmpty(),
-         "If we took something, it should now be on our stack");
-  return res;
-}
-
-size_t MarkDeadObjectsClosure::do_blk(HeapWord* addr) {
-  size_t res = _sp->block_size_no_stall(addr, _collector);
-  if (_sp->block_is_obj(addr)) {
-    if (_live_bit_map->isMarked(addr)) {
-      // It can't have been dead in a previous cycle
-      guarantee(!_dead_bit_map->isMarked(addr), "No resurrection!");
-    } else {
-      _dead_bit_map->mark(addr);      // mark the dead object
-    }
-  }
-  // Could be 0, if the block size could not be computed without stalling.
-  return res;
-}
-
-TraceCMSMemoryManagerStats::TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause): TraceMemoryManagerStats() {
-  GCMemoryManager* manager = CMSHeap::heap()->old_manager();
-  switch (phase) {
-    case CMSCollector::InitialMarking:
-      initialize(manager /* GC manager */ ,
-                 cause   /* cause of the GC */,
-                 true    /* allMemoryPoolsAffected */,
-                 true    /* recordGCBeginTime */,
-                 true    /* recordPreGCUsage */,
-                 false   /* recordPeakUsage */,
-                 false   /* recordPostGCusage */,
-                 true    /* recordAccumulatedGCTime */,
-                 false   /* recordGCEndTime */,
-                 false   /* countCollection */  );
-      break;
-
-    case CMSCollector::FinalMarking:
-      initialize(manager /* GC manager */ ,
-                 cause   /* cause of the GC */,
-                 true    /* allMemoryPoolsAffected */,
-                 false   /* recordGCBeginTime */,
-                 false   /* recordPreGCUsage */,
-                 false   /* recordPeakUsage */,
-                 false   /* recordPostGCusage */,
-                 true    /* recordAccumulatedGCTime */,
-                 false   /* recordGCEndTime */,
-                 false   /* countCollection */  );
-      break;
-
-    case CMSCollector::Sweeping:
-      initialize(manager /* GC manager */ ,
-                 cause   /* cause of the GC */,
-                 true    /* allMemoryPoolsAffected */,
-                 false   /* recordGCBeginTime */,
-                 false   /* recordPreGCUsage */,
-                 true    /* recordPeakUsage */,
-                 true    /* recordPostGCusage */,
-                 false   /* recordAccumulatedGCTime */,
-                 true    /* recordGCEndTime */,
-                 true    /* countCollection */  );
-      break;
-
-    default:
-      ShouldNotReachHere();
-  }
-}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1796 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
-#define SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
-
-#include "gc/cms/cmsOopClosures.hpp"
-#include "gc/cms/gSpaceCounters.hpp"
-#include "gc/cms/yieldingWorkgroup.hpp"
-#include "gc/shared/cardGeneration.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcStats.hpp"
-#include "gc/shared/gcWhen.hpp"
-#include "gc/shared/generationCounters.hpp"
-#include "gc/shared/space.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "logging/log.hpp"
-#include "memory/iterator.hpp"
-#include "memory/virtualspace.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "services/memoryService.hpp"
-#include "utilities/bitMap.hpp"
-#include "utilities/stack.hpp"
-
-// ConcurrentMarkSweepGeneration is in support of a concurrent
-// mark-sweep old generation in the Detlefs-Printezis--Boehm-Demers-Schenker
-// style. We assume, for now, that this generation is always the
-// seniormost generation and for simplicity
-// in the first implementation, that this generation is a single compactible
-// space. Neither of these restrictions appears essential, and will be
-// relaxed in the future when more time is available to implement the
-// greater generality (and there's a need for it).
-//
-// Concurrent mode failures are currently handled by
-// means of a sliding mark-compact.
-
-class AdaptiveSizePolicy;
-class CMSCollector;
-class CMSConcMarkingTask;
-class CMSGCAdaptivePolicyCounters;
-class CMSTracer;
-class ConcurrentGCTimer;
-class ConcurrentMarkSweepGeneration;
-class ConcurrentMarkSweepPolicy;
-class ConcurrentMarkSweepThread;
-class CompactibleFreeListSpace;
-class FreeChunk;
-class ParNewGeneration;
-class PromotionInfo;
-class ScanMarkedObjectsAgainCarefullyClosure;
-class SerialOldTracer;
-
-// A generic CMS bit map. It's the basis for both the CMS marking bit map
-// as well as for the mod union table (in each case only a subset of the
-// methods are used). This is essentially a wrapper around the BitMap class,
-// with one bit per (1<<_shifter) HeapWords. (i.e. for the marking bit map,
-// we have _shifter == 0. and for the mod union table we have
-// shifter == CardTable::card_shift - LogHeapWordSize.)
-// XXX 64-bit issues in BitMap?
-class CMSBitMap {
-  friend class VMStructs;
-
-  HeapWord*    _bmStartWord;   // base address of range covered by map
-  size_t       _bmWordSize;    // map size (in #HeapWords covered)
-  const int    _shifter;       // shifts to convert HeapWord to bit position
-  VirtualSpace _virtual_space; // underlying the bit map
-  BitMapView   _bm;            // the bit map itself
-  Mutex* const _lock;          // mutex protecting _bm;
-
- public:
-  // constructor
-  CMSBitMap(int shifter, int mutex_rank, const char* mutex_name);
-
-  // allocates the actual storage for the map
-  bool allocate(MemRegion mr);
-  // field getter
-  Mutex* lock() const { return _lock; }
-  // locking verifier convenience function
-  void assert_locked() const PRODUCT_RETURN;
-
-  // inquiries
-  HeapWord* startWord()   const { return _bmStartWord; }
-  size_t    sizeInWords() const { return _bmWordSize;  }
-  size_t    sizeInBits()  const { return _bm.size();   }
-  // the following is one past the last word in space
-  HeapWord* endWord()     const { return _bmStartWord + _bmWordSize; }
-
-  // reading marks
-  bool isMarked(HeapWord* addr) const;
-  bool par_isMarked(HeapWord* addr) const; // do not lock checks
-  bool isUnmarked(HeapWord* addr) const;
-  bool isAllClear() const;
-
-  // writing marks
-  void mark(HeapWord* addr);
-  // For marking by parallel GC threads;
-  // returns true if we did, false if another thread did
-  bool par_mark(HeapWord* addr);
-
-  void mark_range(MemRegion mr);
-  void par_mark_range(MemRegion mr);
-  void mark_large_range(MemRegion mr);
-  void par_mark_large_range(MemRegion mr);
-  void par_clear(HeapWord* addr); // For unmarking by parallel GC threads.
-  void clear_range(MemRegion mr);
-  void par_clear_range(MemRegion mr);
-  void clear_large_range(MemRegion mr);
-  void par_clear_large_range(MemRegion mr);
-  void clear_all();
-  void clear_all_incrementally();  // Not yet implemented!!
-
-  NOT_PRODUCT(
-    // checks the memory region for validity
-    void region_invariant(MemRegion mr);
-  )
-
-  // iteration
-  void iterate(BitMapClosure* cl) {
-    _bm.iterate(cl);
-  }
-  void iterate(BitMapClosure* cl, HeapWord* left, HeapWord* right);
-  void dirty_range_iterate_clear(MemRegionClosure* cl);
-  void dirty_range_iterate_clear(MemRegion mr, MemRegionClosure* cl);
-
-  // auxiliary support for iteration
-  HeapWord* getNextMarkedWordAddress(HeapWord* addr) const;
-  HeapWord* getNextMarkedWordAddress(HeapWord* start_addr,
-                                            HeapWord* end_addr) const;
-  HeapWord* getNextUnmarkedWordAddress(HeapWord* addr) const;
-  HeapWord* getNextUnmarkedWordAddress(HeapWord* start_addr,
-                                              HeapWord* end_addr) const;
-  MemRegion getAndClearMarkedRegion(HeapWord* addr);
-  MemRegion getAndClearMarkedRegion(HeapWord* start_addr,
-                                           HeapWord* end_addr);
-
-  // conversion utilities
-  HeapWord* offsetToHeapWord(size_t offset) const;
-  size_t    heapWordToOffset(HeapWord* addr) const;
-  size_t    heapWordDiffToOffsetDiff(size_t diff) const;
-
-  void print_on_error(outputStream* st, const char* prefix) const;
-
-  // debugging
-  // is this address range covered by the bit-map?
-  NOT_PRODUCT(
-    bool covers(MemRegion mr) const;
-    bool covers(HeapWord* start, size_t size = 0) const;
-  )
-  void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
-};
-
-// Represents a marking stack used by the CMS collector.
-// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
-class CMSMarkStack: public CHeapObj<mtGC>  {
-  friend class CMSCollector;   // To get at expansion stats further below.
-
-  VirtualSpace _virtual_space;  // Space for the stack
-  oop*   _base;      // Bottom of stack
-  size_t _index;     // One more than last occupied index
-  size_t _capacity;  // Max #elements
-  Mutex  _par_lock;  // An advisory lock used in case of parallel access
-  NOT_PRODUCT(size_t _max_depth;)  // Max depth plumbed during run
-
- protected:
-  size_t _hit_limit;      // We hit max stack size limit
-  size_t _failed_double;  // We failed expansion before hitting limit
-
- public:
-  CMSMarkStack():
-    _par_lock(Mutex::event, "CMSMarkStack._par_lock", true,
-              Monitor::_safepoint_check_never),
-    _hit_limit(0),
-    _failed_double(0) {}
-
-  bool allocate(size_t size);
-
-  size_t capacity() const { return _capacity; }
-
-  oop pop() {
-    if (!isEmpty()) {
-      return _base[--_index] ;
-    }
-    return NULL;
-  }
-
-  bool push(oop ptr) {
-    if (isFull()) {
-      return false;
-    } else {
-      _base[_index++] = ptr;
-      NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
-      return true;
-    }
-  }
-
-  bool isEmpty() const { return _index == 0; }
-  bool isFull()  const {
-    assert(_index <= _capacity, "buffer overflow");
-    return _index == _capacity;
-  }
-
-  size_t length() { return _index; }
-
-  // "Parallel versions" of some of the above
-  oop par_pop() {
-    // lock and pop
-    MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag);
-    return pop();
-  }
-
-  bool par_push(oop ptr) {
-    // lock and push
-    MutexLocker x(&_par_lock, Mutex::_no_safepoint_check_flag);
-    return push(ptr);
-  }
-
-  // Forcibly reset the stack, losing all of its contents.
-  void reset() {
-    _index = 0;
-  }
-
-  // Expand the stack, typically in response to an overflow condition.
-  void expand();
-
-  // Compute the least valued stack element.
-  oop least_value(HeapWord* low) {
-    HeapWord* least = low;
-    for (size_t i = 0; i < _index; i++) {
-      least = MIN2(least, (HeapWord*)_base[i]);
-    }
-    return (oop)least;
-  }
-
-  // Exposed here to allow stack expansion in || case.
-  Mutex* par_lock() { return &_par_lock; }
-};
-
-class CardTableRS;
-class CMSParGCThreadState;
-
-class ModUnionClosure: public MemRegionClosure {
- protected:
-  CMSBitMap* _t;
- public:
-  ModUnionClosure(CMSBitMap* t): _t(t) { }
-  void do_MemRegion(MemRegion mr);
-};
-
-class ModUnionClosurePar: public ModUnionClosure {
- public:
-  ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
-  void do_MemRegion(MemRegion mr);
-};
-
-// Survivor Chunk Array in support of parallelization of
-// Survivor Space rescan.
-class ChunkArray: public CHeapObj<mtGC> {
-  size_t _index;
-  size_t _capacity;
-  size_t _overflows;
-  HeapWord** _array;   // storage for array
-
- public:
-  ChunkArray() : _index(0), _capacity(0), _overflows(0), _array(NULL) {}
-  ChunkArray(HeapWord** a, size_t c):
-    _index(0), _capacity(c), _overflows(0), _array(a) {}
-
-  HeapWord** array() { return _array; }
-  void set_array(HeapWord** a) { _array = a; }
-
-  size_t capacity() { return _capacity; }
-  void set_capacity(size_t c) { _capacity = c; }
-
-  size_t end() {
-    assert(_index <= capacity(),
-           "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT "): out of bounds",
-           _index, _capacity);
-    return _index;
-  }  // exclusive
-
-  HeapWord* nth(size_t n) {
-    assert(n < end(), "Out of bounds access");
-    return _array[n];
-  }
-
-  void reset() {
-    _index = 0;
-    if (_overflows > 0) {
-      log_trace(gc)("CMS: ChunkArray[" SIZE_FORMAT "] overflowed " SIZE_FORMAT " times", _capacity, _overflows);
-    }
-    _overflows = 0;
-  }
-
-  void record_sample(HeapWord* p, size_t sz) {
-    // For now we do not do anything with the size
-    if (_index < _capacity) {
-      _array[_index++] = p;
-    } else {
-      ++_overflows;
-      assert(_index == _capacity,
-             "_index (" SIZE_FORMAT ") > _capacity (" SIZE_FORMAT
-             "): out of bounds at overflow#" SIZE_FORMAT,
-             _index, _capacity, _overflows);
-    }
-  }
-};
-
-//
-// Timing, allocation and promotion statistics for gc scheduling and incremental
-// mode pacing.  Most statistics are exponential averages.
-//
-class CMSStats {
- private:
-  ConcurrentMarkSweepGeneration* const _cms_gen;   // The cms (old) gen.
-
-  // The following are exponential averages with factor alpha:
-  //   avg = (100 - alpha) * avg + alpha * cur_sample
-  //
-  //   The durations measure:  end_time[n] - start_time[n]
-  //   The periods measure:    start_time[n] - start_time[n-1]
-  //
-  // The cms period and duration include only concurrent collections; time spent
-  // in foreground cms collections due to System.gc() or because of a failure to
-  // keep up are not included.
-  //
-  // There are 3 alphas to "bootstrap" the statistics.  The _saved_alpha is the
-  // real value, but is used only after the first period.  A value of 100 is
-  // used for the first sample so it gets the entire weight.
-  unsigned int _saved_alpha; // 0-100
-  unsigned int _gc0_alpha;
-  unsigned int _cms_alpha;
-
-  double _gc0_duration;
-  double _gc0_period;
-  size_t _gc0_promoted;         // bytes promoted per gc0
-  double _cms_duration;
-  double _cms_duration_pre_sweep; // time from initiation to start of sweep
-  double _cms_period;
-  size_t _cms_allocated;        // bytes of direct allocation per gc0 period
-
-  // Timers.
-  elapsedTimer _cms_timer;
-  TimeStamp    _gc0_begin_time;
-  TimeStamp    _cms_begin_time;
-  TimeStamp    _cms_end_time;
-
-  // Snapshots of the amount used in the CMS generation.
-  size_t _cms_used_at_gc0_begin;
-  size_t _cms_used_at_gc0_end;
-  size_t _cms_used_at_cms_begin;
-
-  // Used to prevent the duty cycle from being reduced in the middle of a cms
-  // cycle.
-  bool _allow_duty_cycle_reduction;
-
-  enum {
-    _GC0_VALID = 0x1,
-    _CMS_VALID = 0x2,
-    _ALL_VALID = _GC0_VALID | _CMS_VALID
-  };
-
-  unsigned int _valid_bits;
-
- protected:
-  // In support of adjusting of cms trigger ratios based on history
-  // of concurrent mode failure.
-  double cms_free_adjustment_factor(size_t free) const;
-  void   adjust_cms_free_adjustment_factor(bool fail, size_t free);
-
- public:
-  CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
-           unsigned int alpha = CMSExpAvgFactor);
-
-  // Whether or not the statistics contain valid data; higher level statistics
-  // cannot be called until this returns true (they require at least one young
-  // gen and one cms cycle to have completed).
-  bool valid() const;
-
-  // Record statistics.
-  void record_gc0_begin();
-  void record_gc0_end(size_t cms_gen_bytes_used);
-  void record_cms_begin();
-  void record_cms_end();
-
-  // Allow management of the cms timer, which must be stopped/started around
-  // yield points.
-  elapsedTimer& cms_timer()     { return _cms_timer; }
-  void start_cms_timer()        { _cms_timer.start(); }
-  void stop_cms_timer()         { _cms_timer.stop(); }
-
-  // Basic statistics; units are seconds or bytes.
-  double gc0_period() const     { return _gc0_period; }
-  double gc0_duration() const   { return _gc0_duration; }
-  size_t gc0_promoted() const   { return _gc0_promoted; }
-  double cms_period() const          { return _cms_period; }
-  double cms_duration() const        { return _cms_duration; }
-  size_t cms_allocated() const       { return _cms_allocated; }
-
-  size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
-
-  // Seconds since the last background cms cycle began or ended.
-  double cms_time_since_begin() const;
-  double cms_time_since_end() const;
-
-  // Higher level statistics--caller must check that valid() returns true before
-  // calling.
-
-  // Returns bytes promoted per second of wall clock time.
-  double promotion_rate() const;
-
-  // Returns bytes directly allocated per second of wall clock time.
-  double cms_allocation_rate() const;
-
-  // Rate at which space in the cms generation is being consumed (sum of the
-  // above two).
-  double cms_consumption_rate() const;
-
-  // Returns an estimate of the number of seconds until the cms generation will
-  // fill up, assuming no collection work is done.
-  double time_until_cms_gen_full() const;
-
-  // Returns an estimate of the number of seconds remaining until
-  // the cms generation collection should start.
-  double time_until_cms_start() const;
-
-  // End of higher level statistics.
-
-  // Debugging.
-  void print_on(outputStream* st) const PRODUCT_RETURN;
-  void print() const { print_on(tty); }
-};
-
-// A closure related to weak references processing which
-// we embed in the CMSCollector, since we need to pass
-// it to the reference processor for secondary filtering
-// of references based on reachability of referent;
-// see role of _is_alive_non_header closure in the
-// ReferenceProcessor class.
-// For objects in the CMS generation, this closure checks
-// if the object is "live" (reachable). Used in weak
-// reference processing.
-class CMSIsAliveClosure: public BoolObjectClosure {
-  const MemRegion  _span;
-  const CMSBitMap* _bit_map;
-
-  friend class CMSCollector;
- public:
-  CMSIsAliveClosure(MemRegion span,
-                    CMSBitMap* bit_map):
-    _span(span),
-    _bit_map(bit_map) {
-    assert(!span.is_empty(), "Empty span could spell trouble");
-  }
-
-  bool do_object_b(oop obj);
-};
-
-
-// Implements AbstractRefProcTaskExecutor for CMS.
-class CMSRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
-public:
-
-  CMSRefProcTaskExecutor(CMSCollector& collector)
-    : _collector(collector)
-  { }
-
-  // Executes a task using worker threads.
-  virtual void execute(ProcessTask& task, uint ergo_workers);
-private:
-  CMSCollector& _collector;
-};
-
-
-class CMSCollector: public CHeapObj<mtGC> {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepThread;
-  friend class ConcurrentMarkSweepGeneration;
-  friend class CompactibleFreeListSpace;
-  friend class CMSParMarkTask;
-  friend class CMSParInitialMarkTask;
-  friend class CMSParRemarkTask;
-  friend class CMSConcMarkingTask;
-  friend class CMSRefProcTaskProxy;
-  friend class CMSRefProcTaskExecutor;
-  friend class ScanMarkedObjectsAgainCarefullyClosure;  // for sampling eden
-  friend class SurvivorSpacePrecleanClosure;            // --- ditto -------
-  friend class PushOrMarkClosure;             // to access _restart_addr
-  friend class ParPushOrMarkClosure;          // to access _restart_addr
-  friend class MarkFromRootsClosure;          //  -- ditto --
-                                              // ... and for clearing cards
-  friend class ParMarkFromRootsClosure;       //  to access _restart_addr
-                                              // ... and for clearing cards
-  friend class ParConcMarkingClosure;         //  to access _restart_addr etc.
-  friend class MarkFromRootsVerifyClosure;    // to access _restart_addr
-  friend class PushAndMarkVerifyClosure;      //  -- ditto --
-  friend class MarkRefsIntoAndScanClosure;    // to access _overflow_list
-  friend class PushAndMarkClosure;            //  -- ditto --
-  friend class ParPushAndMarkClosure;         //  -- ditto --
-  friend class CMSKeepAliveClosure;           //  -- ditto --
-  friend class CMSDrainMarkingStackClosure;   //  -- ditto --
-  friend class CMSInnerParMarkAndPushClosure; //  -- ditto --
-  NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) //  assertion on _overflow_list
-  friend class ReleaseForegroundGC;  // to access _foregroundGCShouldWait
-  friend class VM_CMS_Operation;
-  friend class VM_CMS_Initial_Mark;
-  friend class VM_CMS_Final_Remark;
-  friend class TraceCMSMemoryManagerStats;
-
- private:
-  jlong _time_of_last_gc;
-  void update_time_of_last_gc(jlong now) {
-    _time_of_last_gc = now;
-  }
-
-  OopTaskQueueSet* _task_queues;
-
-  // Overflow list of grey objects, threaded through mark-word
-  // Manipulated with CAS in the parallel/multi-threaded case.
-  oopDesc* volatile _overflow_list;
-  // The following array-pair keeps track of mark words
-  // displaced for accommodating overflow list above.
-  // This code will likely be revisited under RFE#4922830.
-  Stack<oop, mtGC>      _preserved_oop_stack;
-  Stack<markWord, mtGC> _preserved_mark_stack;
-
-  // In support of multi-threaded concurrent phases
-  YieldingFlexibleWorkGang* _conc_workers;
-
-  // Performance Counters
-  CollectorCounters* _gc_counters;
-  CollectorCounters* _cgc_counters;
-
-  // Initialization Errors
-  bool _completed_initialization;
-
-  // In support of ExplicitGCInvokesConcurrent
-  static bool _full_gc_requested;
-  static GCCause::Cause _full_gc_cause;
-  unsigned int _collection_count_start;
-
-  // Should we unload classes this concurrent cycle?
-  bool _should_unload_classes;
-  unsigned int  _concurrent_cycles_since_last_unload;
-  unsigned int concurrent_cycles_since_last_unload() const {
-    return _concurrent_cycles_since_last_unload;
-  }
-  // Did we (allow) unload classes in the previous concurrent cycle?
-  bool unloaded_classes_last_cycle() const {
-    return concurrent_cycles_since_last_unload() == 0;
-  }
-  // Root scanning options for perm gen
-  int _roots_scanning_options;
-  int roots_scanning_options() const      { return _roots_scanning_options; }
-  void add_root_scanning_option(int o)    { _roots_scanning_options |= o;   }
-  void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o;  }
-
-  // Verification support
-  CMSBitMap     _verification_mark_bm;
-  void verify_after_remark_work_1();
-  void verify_after_remark_work_2();
-
-  // True if any verification flag is on.
-  bool _verifying;
-  bool verifying() const { return _verifying; }
-  void set_verifying(bool v) { _verifying = v; }
-
-  void set_did_compact(bool v);
-
-  // XXX Move these to CMSStats ??? FIX ME !!!
-  elapsedTimer _inter_sweep_timer;   // Time between sweeps
-  elapsedTimer _intra_sweep_timer;   // Time _in_ sweeps
-  // Padded decaying average estimates of the above
-  AdaptivePaddedAverage _inter_sweep_estimate;
-  AdaptivePaddedAverage _intra_sweep_estimate;
-
-  CMSTracer* _gc_tracer_cm;
-  ConcurrentGCTimer* _gc_timer_cm;
-
-  bool _cms_start_registered;
-
-  GCHeapSummary _last_heap_summary;
-  MetaspaceSummary _last_metaspace_summary;
-
-  void register_gc_start(GCCause::Cause cause);
-  void register_gc_end();
-  void save_heap_summary();
-  void report_heap_summary(GCWhen::Type when);
-
- protected:
-  ConcurrentMarkSweepGeneration* _cmsGen;  // Old gen (CMS)
-  MemRegion                      _span;    // Span covering above
-  CardTableRS*                   _ct;      // Card table
-
-  // CMS marking support structures
-  CMSBitMap     _markBitMap;
-  CMSBitMap     _modUnionTable;
-  CMSMarkStack  _markStack;
-
-  HeapWord*     _restart_addr; // In support of marking stack overflow
-  void          lower_restart_addr(HeapWord* low);
-
-  // Counters in support of marking stack / work queue overflow handling:
-  // a non-zero value indicates certain types of overflow events during
-  // the current CMS cycle and could lead to stack resizing efforts at
-  // an opportune future time.
-  size_t        _ser_pmc_preclean_ovflw;
-  size_t        _ser_pmc_remark_ovflw;
-  size_t        _par_pmc_remark_ovflw;
-  size_t        _ser_kac_preclean_ovflw;
-  size_t        _ser_kac_ovflw;
-  size_t        _par_kac_ovflw;
-  NOT_PRODUCT(ssize_t _num_par_pushes;)
-
-  // ("Weak") Reference processing support.
-  SpanSubjectToDiscoveryClosure _span_based_discoverer;
-  ReferenceProcessor*           _ref_processor;
-  CMSIsAliveClosure             _is_alive_closure;
-  // Keep this textually after _markBitMap and _span; c'tor dependency.
-
-  ConcurrentMarkSweepThread*     _cmsThread;   // The thread doing the work
-  ModUnionClosurePar _modUnionClosurePar;
-
-  // CMS abstract state machine
-  // initial_state: Idling
-  // next_state(Idling)            = {Marking}
-  // next_state(Marking)           = {Precleaning, Sweeping}
-  // next_state(Precleaning)       = {AbortablePreclean, FinalMarking}
-  // next_state(AbortablePreclean) = {FinalMarking}
-  // next_state(FinalMarking)      = {Sweeping}
-  // next_state(Sweeping)          = {Resizing}
-  // next_state(Resizing)          = {Resetting}
-  // next_state(Resetting)         = {Idling}
-  // The numeric values below are chosen so that:
-  // . _collectorState <= Idling ==  post-sweep && pre-mark
-  // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
-  //                                            precleaning || abortablePrecleanb
- public:
-  enum CollectorState {
-    Resizing            = 0,
-    Resetting           = 1,
-    Idling              = 2,
-    InitialMarking      = 3,
-    Marking             = 4,
-    Precleaning         = 5,
-    AbortablePreclean   = 6,
-    FinalMarking        = 7,
-    Sweeping            = 8
-  };
- protected:
-  static CollectorState _collectorState;
-
-  // State related to prologue/epilogue invocation for my generations
-  bool _between_prologue_and_epilogue;
-
-  // Signaling/State related to coordination between fore- and background GC
-  // Note: When the baton has been passed from background GC to foreground GC,
-  // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
-  static bool _foregroundGCIsActive;    // true iff foreground collector is active or
-                                 // wants to go active
-  static bool _foregroundGCShouldWait;  // true iff background GC is active and has not
-                                 // yet passed the baton to the foreground GC
-
-  // Support for CMSScheduleRemark (abortable preclean)
-  bool _abort_preclean;
-  bool _start_sampling;
-
-  int    _numYields;
-  size_t _numDirtyCards;
-  size_t _sweep_count;
-
-  // Occupancy used for bootstrapping stats
-  double _bootstrap_occupancy;
-
-  // Timer
-  elapsedTimer _timer;
-
-  // Timing, allocation and promotion statistics, used for scheduling.
-  CMSStats      _stats;
-
-  enum CMS_op_type {
-    CMS_op_checkpointRootsInitial,
-    CMS_op_checkpointRootsFinal
-  };
-
-  void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
-  bool stop_world_and_do(CMS_op_type op);
-
-  OopTaskQueueSet* task_queues() { return _task_queues; }
-  YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
-
-  // Support for parallelizing Eden rescan in CMS remark phase
-  void sample_eden(); // ... sample Eden space top
-
- private:
-  // Support for parallelizing young gen rescan in CMS remark phase
-  ParNewGeneration* _young_gen;
-
-  HeapWord* volatile* _top_addr;    // ... Top of Eden
-  HeapWord**          _end_addr;    // ... End of Eden
-  Mutex*              _eden_chunk_lock;
-  HeapWord**          _eden_chunk_array; // ... Eden partitioning array
-  size_t              _eden_chunk_index; // ... top (exclusive) of array
-  size_t              _eden_chunk_capacity;  // ... max entries in array
-
-  // Support for parallelizing survivor space rescan
-  HeapWord** _survivor_chunk_array;
-  size_t     _survivor_chunk_index;
-  size_t     _survivor_chunk_capacity;
-  size_t*    _cursor;
-  ChunkArray* _survivor_plab_array;
-
-  // Support for marking stack overflow handling
-  bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
-  bool par_take_from_overflow_list(size_t num,
-                                   OopTaskQueue* to_work_q,
-                                   int no_of_gc_threads);
-  void push_on_overflow_list(oop p);
-  void par_push_on_overflow_list(oop p);
-  // The following is, obviously, not, in general, "MT-stable"
-  bool overflow_list_is_empty() const;
-
-  void preserve_mark_if_necessary(oop p);
-  void par_preserve_mark_if_necessary(oop p);
-  void preserve_mark_work(oop p, markWord m);
-  void restore_preserved_marks_if_any();
-  NOT_PRODUCT(bool no_preserved_marks() const;)
-  // In support of testing overflow code
-  NOT_PRODUCT(int _overflow_counter;)
-  NOT_PRODUCT(bool simulate_overflow();)       // Sequential
-  NOT_PRODUCT(bool par_simulate_overflow();)   // MT version
-
-  // CMS work methods
-  void checkpointRootsInitialWork(); // Initial checkpoint work
-
-  // A return value of false indicates failure due to stack overflow
-  bool markFromRootsWork();  // Concurrent marking work
-
- public:   // FIX ME!!! only for testing
-  bool do_marking_st();      // Single-threaded marking
-  bool do_marking_mt();      // Multi-threaded  marking
-
- private:
-
-  // Concurrent precleaning work
-  size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* old_gen,
-                                  ScanMarkedObjectsAgainCarefullyClosure* cl);
-  size_t preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
-                             ScanMarkedObjectsAgainCarefullyClosure* cl);
-  // Does precleaning work, returning a quantity indicative of
-  // the amount of "useful work" done.
-  size_t preclean_work(bool clean_refs, bool clean_survivors);
-  void preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
-  void abortable_preclean(); // Preclean while looking for possible abort
-  void initialize_sequential_subtasks_for_young_gen_rescan(int i);
-  // Helper function for above; merge-sorts the per-thread plab samples
-  void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
-  // Resets (i.e. clears) the per-thread plab sample vectors
-  void reset_survivor_plab_arrays();
-
-  // Final (second) checkpoint work
-  void checkpointRootsFinalWork();
-  // Work routine for parallel version of remark
-  void do_remark_parallel();
-  // Work routine for non-parallel version of remark
-  void do_remark_non_parallel();
-  // Reference processing work routine (during second checkpoint)
-  void refProcessingWork();
-
-  // Concurrent sweeping work
-  void sweepWork(ConcurrentMarkSweepGeneration* old_gen);
-
-  // Concurrent resetting of support data structures
-  void reset_concurrent();
-  // Resetting of support data structures from a STW full GC
-  void reset_stw();
-
-  // Clear _expansion_cause fields of constituent generations
-  void clear_expansion_cause();
-
-  // An auxiliary method used to record the ends of
-  // used regions of each generation to limit the extent of sweep
-  void save_sweep_limits();
-
-  // A work method used by the foreground collector to do
-  // a mark-sweep-compact.
-  void do_compaction_work(bool clear_all_soft_refs);
-
-  // Work methods for reporting concurrent mode interruption or failure
-  bool is_external_interruption();
-  void report_concurrent_mode_interruption();
-
-  // If the background GC is active, acquire control from the background
-  // GC and do the collection.
-  void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
-
-  // For synchronizing passing of control from background to foreground
-  // GC.  waitForForegroundGC() is called by the background
-  // collector.  It if had to wait for a foreground collection,
-  // it returns true and the background collection should assume
-  // that the collection was finished by the foreground
-  // collector.
-  bool waitForForegroundGC();
-
-  size_t block_size_using_printezis_bits(HeapWord* addr) const;
-  size_t block_size_if_printezis_bits(HeapWord* addr) const;
-  HeapWord* next_card_start_after_block(HeapWord* addr) const;
-
-  void setup_cms_unloading_and_verification_state();
- public:
-  CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
-               CardTableRS*                   ct);
-  ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
-
-  MemRegion ref_processor_span() const { return _span_based_discoverer.span(); }
-  ReferenceProcessor* ref_processor() { return _ref_processor; }
-  void ref_processor_init();
-
-  Mutex* bitMapLock()        const { return _markBitMap.lock();    }
-  static CollectorState abstract_state() { return _collectorState;  }
-
-  bool should_abort_preclean() const; // Whether preclean should be aborted.
-  size_t get_eden_used() const;
-  size_t get_eden_capacity() const;
-
-  ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
-
-  // Locking checks
-  NOT_PRODUCT(static bool have_cms_token();)
-
-  bool shouldConcurrentCollect();
-
-  void collect(bool   full,
-               bool   clear_all_soft_refs,
-               size_t size,
-               bool   tlab);
-  void collect_in_background(GCCause::Cause cause);
-
-  // In support of ExplicitGCInvokesConcurrent
-  static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
-  // Should we unload classes in a particular concurrent cycle?
-  bool should_unload_classes() const {
-    return _should_unload_classes;
-  }
-  void update_should_unload_classes();
-
-  void direct_allocated(HeapWord* start, size_t size);
-
-  // Object is dead if not marked and current phase is sweeping.
-  bool is_dead_obj(oop obj) const;
-
-  // After a promotion (of "start"), do any necessary marking.
-  // If "par", then it's being done by a parallel GC thread.
-  // The last two args indicate if we need precise marking
-  // and if so the size of the object so it can be dirtied
-  // in its entirety.
-  void promoted(bool par, HeapWord* start,
-                bool is_obj_array, size_t obj_size);
-
-  void getFreelistLocks() const;
-  void releaseFreelistLocks() const;
-  bool haveFreelistLocks() const;
-
-  // Adjust size of underlying generation
-  void compute_new_size();
-
-  // GC prologue and epilogue
-  void gc_prologue(bool full);
-  void gc_epilogue(bool full);
-
-  jlong time_of_last_gc(jlong now) {
-    if (_collectorState <= Idling) {
-      // gc not in progress
-      return _time_of_last_gc;
-    } else {
-      // collection in progress
-      return now;
-    }
-  }
-
-  // Support for parallel remark of survivor space
-  void* get_data_recorder(int thr_num);
-  void sample_eden_chunk();
-
-  CMSBitMap* markBitMap()  { return &_markBitMap; }
-  void directAllocated(HeapWord* start, size_t size);
-
-  // Main CMS steps and related support
-  void checkpointRootsInitial();
-  bool markFromRoots();  // a return value of false indicates failure
-                         // due to stack overflow
-  void preclean();
-  void checkpointRootsFinal();
-  void sweep();
-
-  // Check that the currently executing thread is the expected
-  // one (foreground collector or background collector).
-  static void check_correct_thread_executing() PRODUCT_RETURN;
-
-  NOT_PRODUCT(bool is_cms_reachable(HeapWord* addr);)
-
-  // Performance Counter Support
-  CollectorCounters* counters()     { return _gc_counters; }
-  CollectorCounters* cgc_counters() { return _cgc_counters; }
-
-  // Timer stuff
-  void    startTimer() { assert(!_timer.is_active(), "Error"); _timer.start();   }
-  void    stopTimer()  { assert( _timer.is_active(), "Error"); _timer.stop();    }
-  void    resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset();   }
-  jlong   timerTicks() { assert(!_timer.is_active(), "Error"); return _timer.ticks(); }
-
-  int  yields()          { return _numYields; }
-  void resetYields()     { _numYields = 0;    }
-  void incrementYields() { _numYields++;      }
-  void resetNumDirtyCards()               { _numDirtyCards = 0; }
-  void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
-  size_t  numDirtyCards()                 { return _numDirtyCards; }
-
-  static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
-  static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
-  static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
-  static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
-  size_t sweep_count() const             { return _sweep_count; }
-  void   increment_sweep_count()         { _sweep_count++; }
-
-  // Timers/stats for gc scheduling and incremental mode pacing.
-  CMSStats& stats() { return _stats; }
-
-  // Adaptive size policy
-  AdaptiveSizePolicy* size_policy();
-
-  static void print_on_error(outputStream* st);
-
-  // Debugging
-  void verify();
-  bool verify_after_remark();
-  void verify_ok_to_terminate() const PRODUCT_RETURN;
-  void verify_work_stacks_empty() const PRODUCT_RETURN;
-  void verify_overflow_empty() const PRODUCT_RETURN;
-
-  // Convenience methods in support of debugging
-  static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
-  HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
-
-  // Accessors
-  CMSMarkStack* verification_mark_stack() { return &_markStack; }
-  CMSBitMap*    verification_mark_bm()    { return &_verification_mark_bm; }
-
-  // Initialization errors
-  bool completed_initialization() { return _completed_initialization; }
-
-  void print_eden_and_survivor_chunk_arrays();
-
-  ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
-};
-
-class CMSExpansionCause : public AllStatic  {
- public:
-  enum Cause {
-    _no_expansion,
-    _satisfy_free_ratio,
-    _satisfy_promotion,
-    _satisfy_allocation,
-    _allocate_par_lab,
-    _allocate_par_spooling_space,
-    _adaptive_size_policy
-  };
-  // Return a string describing the cause of the expansion.
-  static const char* to_string(CMSExpansionCause::Cause cause);
-};
-
-class ConcurrentMarkSweepGeneration: public CardGeneration {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepThread;
-  friend class ConcurrentMarkSweep;
-  friend class CMSCollector;
- protected:
-  static CMSCollector*       _collector; // the collector that collects us
-  CompactibleFreeListSpace*  _cmsSpace;  // underlying space (only one for now)
-
-  // Performance Counters
-  GenerationCounters*      _gen_counters;
-  GSpaceCounters*          _space_counters;
-
-  // Words directly allocated, used by CMSStats.
-  size_t _direct_allocated_words;
-
-  // Non-product stat counters
-  NOT_PRODUCT(
-    size_t _numObjectsPromoted;
-    size_t _numWordsPromoted;
-    size_t _numObjectsAllocated;
-    size_t _numWordsAllocated;
-  )
-
-  // Used for sizing decisions
-  bool _incremental_collection_failed;
-  bool incremental_collection_failed() {
-    return _incremental_collection_failed;
-  }
-  void set_incremental_collection_failed() {
-    _incremental_collection_failed = true;
-  }
-  void clear_incremental_collection_failed() {
-    _incremental_collection_failed = false;
-  }
-
-  // accessors
-  void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
-  CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
-
-  // Accessing spaces
-  CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; }
-
- private:
-  // For parallel young-gen GC support.
-  CMSParGCThreadState** _par_gc_thread_states;
-
-  // Reason generation was expanded
-  CMSExpansionCause::Cause _expansion_cause;
-
-  // In support of MinChunkSize being larger than min object size
-  const double _dilatation_factor;
-
-  // True if a compacting collection was done.
-  bool _did_compact;
-  bool did_compact() { return _did_compact; }
-
-  // Fraction of current occupancy at which to start a CMS collection which
-  // will collect this generation (at least).
-  double _initiating_occupancy;
-
- protected:
-  // Shrink generation by specified size (returns false if unable to shrink)
-  void shrink_free_list_by(size_t bytes);
-
-  // Update statistics for GC
-  virtual void update_gc_stats(Generation* current_generation, bool full);
-
-  // Maximum available space in the generation (including uncommitted)
-  // space.
-  size_t max_available() const;
-
-  // getter and initializer for _initiating_occupancy field.
-  double initiating_occupancy() const { return _initiating_occupancy; }
-  void   init_initiating_occupancy(intx io, uintx tr);
-
-  void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
-
-  void assert_correct_size_change_locking();
-
- public:
-  ConcurrentMarkSweepGeneration(ReservedSpace rs,
-                                size_t initial_byte_size,
-                                size_t min_byte_size,
-                                size_t max_byte_size,
-                                CardTableRS* ct);
-
-  // Accessors
-  CMSCollector* collector() const { return _collector; }
-  static void set_collector(CMSCollector* collector) {
-    assert(_collector == NULL, "already set");
-    _collector = collector;
-  }
-  CompactibleFreeListSpace*  cmsSpace() const { return _cmsSpace;  }
-
-  Mutex* freelistLock() const;
-
-  virtual Generation::Name kind() { return Generation::ConcurrentMarkSweep; }
-
-  void set_did_compact(bool v) { _did_compact = v; }
-
-  bool refs_discovery_is_atomic() const { return false; }
-  bool refs_discovery_is_mt()     const {
-    // Note: CMS does MT-discovery during the parallel-remark
-    // phases. Use ReferenceProcessorMTMutator to make refs
-    // discovery MT-safe during such phases or other parallel
-    // discovery phases in the future. This may all go away
-    // if/when we decide that refs discovery is sufficiently
-    // rare that the cost of the CAS's involved is in the
-    // noise. That's a measurement that should be done, and
-    // the code simplified if that turns out to be the case.
-    return ConcGCThreads > 1;
-  }
-
-  // Override
-  virtual void ref_processor_init();
-
-  void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
-
-  // Space enquiries
-  double occupancy() const { return ((double)used())/((double)capacity()); }
-  size_t contiguous_available() const;
-  size_t unsafe_max_alloc_nogc() const;
-  size_t used_stable() const;
-
-  // over-rides
-  MemRegion used_region_at_save_marks() const;
-
-  // Adjust quantities in the generation affected by
-  // the compaction.
-  void reset_after_compaction();
-
-  // Allocation support
-  HeapWord* allocate(size_t size, bool tlab);
-  HeapWord* have_lock_and_allocate(size_t size, bool tlab);
-  oop       promote(oop obj, size_t obj_size);
-  HeapWord* par_allocate(size_t size, bool tlab) {
-    return allocate(size, tlab);
-  }
-
-
-  // Used by CMSStats to track direct allocation.  The value is sampled and
-  // reset after each young gen collection.
-  size_t direct_allocated_words() const { return _direct_allocated_words; }
-  void reset_direct_allocated_words()   { _direct_allocated_words = 0; }
-
-  // Overrides for parallel promotion.
-  virtual oop par_promote(int thread_num,
-                          oop obj, markWord m, size_t word_sz);
-  virtual void par_promote_alloc_done(int thread_num);
-  virtual void par_oop_since_save_marks_iterate_done(int thread_num);
-
-  virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
-
-  // Inform this (old) generation that a promotion failure was
-  // encountered during a collection of the young generation.
-  virtual void promotion_failure_occurred();
-
-  bool should_collect(bool full, size_t size, bool tlab);
-  virtual bool should_concurrent_collect() const;
-  virtual bool is_too_full() const;
-  void collect(bool   full,
-               bool   clear_all_soft_refs,
-               size_t size,
-               bool   tlab);
-
-  HeapWord* expand_and_allocate(size_t word_size,
-                                bool tlab,
-                                bool parallel = false);
-
-  // GC prologue and epilogue
-  void gc_prologue(bool full);
-  void gc_prologue_work(bool full, bool registerClosure,
-                        ModUnionClosure* modUnionClosure);
-  void gc_epilogue(bool full);
-  void gc_epilogue_work(bool full);
-
-  // Time since last GC of this generation
-  jlong time_of_last_gc(jlong now) {
-    return collector()->time_of_last_gc(now);
-  }
-  void update_time_of_last_gc(jlong now) {
-    collector()-> update_time_of_last_gc(now);
-  }
-
-  // Allocation failure
-  void shrink(size_t bytes);
-  HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
-  bool expand_and_ensure_spooling_space(PromotionInfo* promo);
-
-  // Iteration support and related enquiries
-  void save_marks();
-  bool no_allocs_since_save_marks();
-
-  // Iteration support specific to CMS generations
-  void save_sweep_limit();
-
-  // More iteration support
-  virtual void oop_iterate(OopIterateClosure* cl);
-  virtual void safe_object_iterate(ObjectClosure* cl);
-  virtual void object_iterate(ObjectClosure* cl);
-
-  template <typename OopClosureType>
-  void oop_since_save_marks_iterate(OopClosureType* cl);
-
-  // Smart allocation  XXX -- move to CFLSpace?
-  void setNearLargestChunk();
-  bool isNearLargestChunk(HeapWord* addr);
-
-  // Get the chunk at the end of the space.  Delegates to
-  // the space.
-  FreeChunk* find_chunk_at_end();
-
-  void post_compact();
-
-  // Debugging
-  void prepare_for_verify();
-  void verify();
-  void print_statistics()               PRODUCT_RETURN;
-
-  // Performance Counters support
-  virtual void update_counters();
-  virtual void update_counters(size_t used);
-  void initialize_performance_counters(size_t min_old_size, size_t max_old_size);
-  CollectorCounters* counters()  { return collector()->counters(); }
-
-  // Support for parallel remark of survivor space
-  void* get_data_recorder(int thr_num) {
-    //Delegate to collector
-    return collector()->get_data_recorder(thr_num);
-  }
-  void sample_eden_chunk() {
-    //Delegate to collector
-    return collector()->sample_eden_chunk();
-  }
-
-  // Printing
-  const char* name() const;
-  virtual const char* short_name() const { return "CMS"; }
-  void        print() const;
-
-  // Resize the generation after a compacting GC.  The
-  // generation can be treated as a contiguous space
-  // after the compaction.
-  virtual void compute_new_size();
-  // Resize the generation after a non-compacting
-  // collection.
-  void compute_new_size_free_list();
-};
-
-//
-// Closures of various sorts used by CMS to accomplish its work
-//
-
-// This closure is used to do concurrent marking from the roots
-// following the first checkpoint.
-class MarkFromRootsClosure: public BitMapClosure {
-  CMSCollector*  _collector;
-  MemRegion      _span;
-  CMSBitMap*     _bitMap;
-  CMSBitMap*     _mut;
-  CMSMarkStack*  _markStack;
-  bool           _yield;
-  int            _skipBits;
-  HeapWord*      _finger;
-  HeapWord*      _threshold;
-  DEBUG_ONLY(bool _verifying;)
-
- public:
-  MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
-                       CMSBitMap* bitMap,
-                       CMSMarkStack*  markStack,
-                       bool should_yield, bool verifying = false);
-  bool do_bit(size_t offset);
-  void reset(HeapWord* addr);
-  inline void do_yield_check();
-
- private:
-  void scanOopsInOop(HeapWord* ptr);
-  void do_yield_work();
-};
-
-// This closure is used to do concurrent multi-threaded
-// marking from the roots following the first checkpoint.
-// XXX This should really be a subclass of The serial version
-// above, but i have not had the time to refactor things cleanly.
-class ParMarkFromRootsClosure: public BitMapClosure {
-  CMSCollector*  _collector;
-  MemRegion      _whole_span;
-  MemRegion      _span;
-  CMSBitMap*     _bit_map;
-  CMSBitMap*     _mut;
-  OopTaskQueue*  _work_queue;
-  CMSMarkStack*  _overflow_stack;
-  int            _skip_bits;
-  HeapWord*      _finger;
-  HeapWord*      _threshold;
-  CMSConcMarkingTask* _task;
- public:
-  ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
-                          MemRegion span,
-                          CMSBitMap* bit_map,
-                          OopTaskQueue* work_queue,
-                          CMSMarkStack*  overflow_stack);
-  bool do_bit(size_t offset);
-  inline void do_yield_check();
-
- private:
-  void scan_oops_in_oop(HeapWord* ptr);
-  void do_yield_work();
-  bool get_work_from_overflow_stack();
-};
-
-// The following closures are used to do certain kinds of verification of
-// CMS marking.
-class PushAndMarkVerifyClosure: public MetadataVisitingOopIterateClosure {
-  CMSCollector*    _collector;
-  MemRegion        _span;
-  CMSBitMap*       _verification_bm;
-  CMSBitMap*       _cms_bm;
-  CMSMarkStack*    _mark_stack;
- protected:
-  void do_oop(oop p);
-  template <class T> void do_oop_work(T *p);
-
- public:
-  PushAndMarkVerifyClosure(CMSCollector* cms_collector,
-                           MemRegion span,
-                           CMSBitMap* verification_bm,
-                           CMSBitMap* cms_bm,
-                           CMSMarkStack*  mark_stack);
-  void do_oop(oop* p);
-  void do_oop(narrowOop* p);
-
-  // Deal with a stack overflow condition
-  void handle_stack_overflow(HeapWord* lost);
-};
-
-class MarkFromRootsVerifyClosure: public BitMapClosure {
-  CMSCollector*  _collector;
-  MemRegion      _span;
-  CMSBitMap*     _verification_bm;
-  CMSBitMap*     _cms_bm;
-  CMSMarkStack*  _mark_stack;
-  HeapWord*      _finger;
-  PushAndMarkVerifyClosure _pam_verify_closure;
- public:
-  MarkFromRootsVerifyClosure(CMSCollector* collector, MemRegion span,
-                             CMSBitMap* verification_bm,
-                             CMSBitMap* cms_bm,
-                             CMSMarkStack*  mark_stack);
-  bool do_bit(size_t offset);
-  void reset(HeapWord* addr);
-};
-
-
-// This closure is used to check that a certain set of bits is
-// "empty" (i.e. the bit vector doesn't have any 1-bits).
-class FalseBitMapClosure: public BitMapClosure {
- public:
-  bool do_bit(size_t offset) {
-    guarantee(false, "Should not have a 1 bit");
-    return true;
-  }
-};
-
-// A version of ObjectClosure with "memory" (see _previous_address below)
-class UpwardsObjectClosure: public BoolObjectClosure {
-  HeapWord* _previous_address;
- public:
-  UpwardsObjectClosure() : _previous_address(NULL) { }
-  void set_previous(HeapWord* addr) { _previous_address = addr; }
-  HeapWord* previous()              { return _previous_address; }
-  // A return value of "true" can be used by the caller to decide
-  // if this object's end should *NOT* be recorded in
-  // _previous_address above.
-  virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
-};
-
-// This closure is used during the second checkpointing phase
-// to rescan the marked objects on the dirty cards in the mod
-// union table and the card table proper. It's invoked via
-// MarkFromDirtyCardsClosure below. It uses either
-// [Par_]MarkRefsIntoAndScanClosure (Par_ in the parallel case)
-// declared in genOopClosures.hpp to accomplish some of its work.
-// In the parallel case the bitMap is shared, so access to
-// it needs to be suitably synchronized for updates by embedded
-// closures that update it; however, this closure itself only
-// reads the bit_map and because it is idempotent, is immune to
-// reading stale values.
-class ScanMarkedObjectsAgainClosure: public UpwardsObjectClosure {
-  #ifdef ASSERT
-    CMSCollector*          _collector;
-    MemRegion              _span;
-    union {
-      CMSMarkStack*        _mark_stack;
-      OopTaskQueue*        _work_queue;
-    };
-  #endif // ASSERT
-  bool                       _parallel;
-  CMSBitMap*                 _bit_map;
-  union {
-    MarkRefsIntoAndScanClosure*    _scan_closure;
-    ParMarkRefsIntoAndScanClosure* _par_scan_closure;
-  };
-
- public:
-  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
-                                MemRegion span,
-                                ReferenceProcessor* rp,
-                                CMSBitMap* bit_map,
-                                CMSMarkStack*  mark_stack,
-                                MarkRefsIntoAndScanClosure* cl):
-    #ifdef ASSERT
-      _collector(collector),
-      _span(span),
-      _mark_stack(mark_stack),
-    #endif // ASSERT
-    _parallel(false),
-    _bit_map(bit_map),
-    _scan_closure(cl) { }
-
-  ScanMarkedObjectsAgainClosure(CMSCollector* collector,
-                                MemRegion span,
-                                ReferenceProcessor* rp,
-                                CMSBitMap* bit_map,
-                                OopTaskQueue* work_queue,
-                                ParMarkRefsIntoAndScanClosure* cl):
-    #ifdef ASSERT
-      _collector(collector),
-      _span(span),
-      _work_queue(work_queue),
-    #endif // ASSERT
-    _parallel(true),
-    _bit_map(bit_map),
-    _par_scan_closure(cl) { }
-
-  bool do_object_b(oop obj) {
-    guarantee(false, "Call do_object_b(oop, MemRegion) form instead");
-    return false;
-  }
-  bool do_object_bm(oop p, MemRegion mr);
-};
-
-// This closure is used during the second checkpointing phase
-// to rescan the marked objects on the dirty cards in the mod
-// union table and the card table proper. It invokes
-// ScanMarkedObjectsAgainClosure above to accomplish much of its work.
-// In the parallel case, the bit map is shared and requires
-// synchronized access.
-class MarkFromDirtyCardsClosure: public MemRegionClosure {
-  CompactibleFreeListSpace*      _space;
-  ScanMarkedObjectsAgainClosure  _scan_cl;
-  size_t                         _num_dirty_cards;
-
- public:
-  MarkFromDirtyCardsClosure(CMSCollector* collector,
-                            MemRegion span,
-                            CompactibleFreeListSpace* space,
-                            CMSBitMap* bit_map,
-                            CMSMarkStack* mark_stack,
-                            MarkRefsIntoAndScanClosure* cl):
-    _space(space),
-    _scan_cl(collector, span, collector->ref_processor(), bit_map,
-                 mark_stack, cl),
-    _num_dirty_cards(0) { }
-
-  MarkFromDirtyCardsClosure(CMSCollector* collector,
-                            MemRegion span,
-                            CompactibleFreeListSpace* space,
-                            CMSBitMap* bit_map,
-                            OopTaskQueue* work_queue,
-                            ParMarkRefsIntoAndScanClosure* cl):
-    _space(space),
-    _scan_cl(collector, span, collector->ref_processor(), bit_map,
-             work_queue, cl),
-    _num_dirty_cards(0) { }
-
-  void do_MemRegion(MemRegion mr);
-  void set_space(CompactibleFreeListSpace* space) { _space = space; }
-  size_t num_dirty_cards() { return _num_dirty_cards; }
-};
-
-// This closure is used in the non-product build to check
-// that there are no MemRegions with a certain property.
-class FalseMemRegionClosure: public MemRegionClosure {
-  void do_MemRegion(MemRegion mr) {
-    guarantee(!mr.is_empty(), "Shouldn't be empty");
-    guarantee(false, "Should never be here");
-  }
-};
-
-// This closure is used during the precleaning phase
-// to "carefully" rescan marked objects on dirty cards.
-// It uses MarkRefsIntoAndScanClosure declared in genOopClosures.hpp
-// to accomplish some of its work.
-class ScanMarkedObjectsAgainCarefullyClosure: public ObjectClosureCareful {
-  CMSCollector*                  _collector;
-  MemRegion                      _span;
-  bool                           _yield;
-  Mutex*                         _freelistLock;
-  CMSBitMap*                     _bitMap;
-  CMSMarkStack*                  _markStack;
-  MarkRefsIntoAndScanClosure*    _scanningClosure;
-  DEBUG_ONLY(HeapWord*           _last_scanned_object;)
-
- public:
-  ScanMarkedObjectsAgainCarefullyClosure(CMSCollector* collector,
-                                         MemRegion     span,
-                                         CMSBitMap* bitMap,
-                                         CMSMarkStack*  markStack,
-                                         MarkRefsIntoAndScanClosure* cl,
-                                         bool should_yield):
-    _collector(collector),
-    _span(span),
-    _yield(should_yield),
-    _bitMap(bitMap),
-    _markStack(markStack),
-    _scanningClosure(cl)
-    DEBUG_ONLY(COMMA _last_scanned_object(NULL))
-  { }
-
-  void do_object(oop p) {
-    guarantee(false, "call do_object_careful instead");
-  }
-
-  size_t      do_object_careful(oop p) {
-    guarantee(false, "Unexpected caller");
-    return 0;
-  }
-
-  size_t      do_object_careful_m(oop p, MemRegion mr);
-
-  void setFreelistLock(Mutex* m) {
-    _freelistLock = m;
-    _scanningClosure->set_freelistLock(m);
-  }
-
- private:
-  inline bool do_yield_check();
-
-  void do_yield_work();
-};
-
-class SurvivorSpacePrecleanClosure: public ObjectClosureCareful {
-  CMSCollector*                  _collector;
-  MemRegion                      _span;
-  bool                           _yield;
-  CMSBitMap*                     _bit_map;
-  CMSMarkStack*                  _mark_stack;
-  PushAndMarkClosure*            _scanning_closure;
-  unsigned int                   _before_count;
-
- public:
-  SurvivorSpacePrecleanClosure(CMSCollector* collector,
-                               MemRegion     span,
-                               CMSBitMap*    bit_map,
-                               CMSMarkStack* mark_stack,
-                               PushAndMarkClosure* cl,
-                               unsigned int  before_count,
-                               bool          should_yield):
-    _collector(collector),
-    _span(span),
-    _yield(should_yield),
-    _bit_map(bit_map),
-    _mark_stack(mark_stack),
-    _scanning_closure(cl),
-    _before_count(before_count)
-  { }
-
-  void do_object(oop p) {
-    guarantee(false, "call do_object_careful instead");
-  }
-
-  size_t      do_object_careful(oop p);
-
-  size_t      do_object_careful_m(oop p, MemRegion mr) {
-    guarantee(false, "Unexpected caller");
-    return 0;
-  }
-
- private:
-  inline void do_yield_check();
-  void do_yield_work();
-};
-
-// This closure is used to accomplish the sweeping work
-// after the second checkpoint but before the concurrent reset
-// phase.
-//
-// Terminology
-//   left hand chunk (LHC) - block of one or more chunks currently being
-//     coalesced.  The LHC is available for coalescing with a new chunk.
-//   right hand chunk (RHC) - block that is currently being swept that is
-//     free or garbage that can be coalesced with the LHC.
-// _inFreeRange is true if there is currently a LHC
-// _lastFreeRangeCoalesced is true if the LHC consists of more than one chunk.
-// _freeRangeInFreeLists is true if the LHC is in the free lists.
-// _freeFinger is the address of the current LHC
-class SweepClosure: public BlkClosureCareful {
-  CMSCollector*                  _collector;  // collector doing the work
-  ConcurrentMarkSweepGeneration* _g;    // Generation being swept
-  CompactibleFreeListSpace*      _sp;   // Space being swept
-  HeapWord*                      _limit;// the address at or above which the sweep should stop
-                                        // because we do not expect newly garbage blocks
-                                        // eligible for sweeping past that address.
-  Mutex*                         _freelistLock; // Free list lock (in space)
-  CMSBitMap*                     _bitMap;       // Marking bit map (in
-                                                // generation)
-  bool                           _inFreeRange;  // Indicates if we are in the
-                                                // midst of a free run
-  bool                           _freeRangeInFreeLists;
-                                        // Often, we have just found
-                                        // a free chunk and started
-                                        // a new free range; we do not
-                                        // eagerly remove this chunk from
-                                        // the free lists unless there is
-                                        // a possibility of coalescing.
-                                        // When true, this flag indicates
-                                        // that the _freeFinger below
-                                        // points to a potentially free chunk
-                                        // that may still be in the free lists
-  bool                           _lastFreeRangeCoalesced;
-                                        // free range contains chunks
-                                        // coalesced
-  bool                           _yield;
-                                        // Whether sweeping should be
-                                        // done with yields. For instance
-                                        // when done by the foreground
-                                        // collector we shouldn't yield.
-  HeapWord*                      _freeFinger;   // When _inFreeRange is set, the
-                                                // pointer to the "left hand
-                                                // chunk"
-  size_t                         _freeRangeSize;
-                                        // When _inFreeRange is set, this
-                                        // indicates the accumulated size
-                                        // of the "left hand chunk"
-  NOT_PRODUCT(
-    size_t                       _numObjectsFreed;
-    size_t                       _numWordsFreed;
-    size_t                       _numObjectsLive;
-    size_t                       _numWordsLive;
-    size_t                       _numObjectsAlreadyFree;
-    size_t                       _numWordsAlreadyFree;
-    FreeChunk*                   _last_fc;
-  )
- private:
-  // Code that is common to a free chunk or garbage when
-  // encountered during sweeping.
-  void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
-  // Process a free chunk during sweeping.
-  void do_already_free_chunk(FreeChunk *fc);
-  // Work method called when processing an already free or a
-  // freshly garbage chunk to do a lookahead and possibly a
-  // preemptive flush if crossing over _limit.
-  void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
-  // Process a garbage chunk during sweeping.
-  size_t do_garbage_chunk(FreeChunk *fc);
-  // Process a live chunk during sweeping.
-  size_t do_live_chunk(FreeChunk* fc);
-
-  // Accessors.
-  HeapWord* freeFinger() const          { return _freeFinger; }
-  void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
-  bool inFreeRange()    const           { return _inFreeRange; }
-  void set_inFreeRange(bool v)          { _inFreeRange = v; }
-  bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
-  void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
-  bool freeRangeInFreeLists() const     { return _freeRangeInFreeLists; }
-  void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
-
-  // Initialize a free range.
-  void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
-  // Return this chunk to the free lists.
-  void flush_cur_free_chunk(HeapWord* chunk, size_t size);
-
-  // Check if we should yield and do so when necessary.
-  inline void do_yield_check(HeapWord* addr);
-
-  // Yield
-  void do_yield_work(HeapWord* addr);
-
-  // Debugging/Printing
-  void print_free_block_coalesced(FreeChunk* fc) const;
-
- public:
-  SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
-               CMSBitMap* bitMap, bool should_yield);
-  ~SweepClosure() PRODUCT_RETURN;
-
-  size_t       do_blk_careful(HeapWord* addr);
-  void         print() const { print_on(tty); }
-  void         print_on(outputStream *st) const;
-};
-
-// Closures related to weak references processing
-
-// During CMS' weak reference processing, this is a
-// work-routine/closure used to complete transitive
-// marking of objects as live after a certain point
-// in which an initial set has been completely accumulated.
-// This closure is currently used both during the final
-// remark stop-world phase, as well as during the concurrent
-// precleaning of the discovered reference lists.
-class CMSDrainMarkingStackClosure: public VoidClosure {
-  CMSCollector*        _collector;
-  MemRegion            _span;
-  CMSMarkStack*        _mark_stack;
-  CMSBitMap*           _bit_map;
-  CMSKeepAliveClosure* _keep_alive;
-  bool                 _concurrent_precleaning;
- public:
-  CMSDrainMarkingStackClosure(CMSCollector* collector, MemRegion span,
-                      CMSBitMap* bit_map, CMSMarkStack* mark_stack,
-                      CMSKeepAliveClosure* keep_alive,
-                      bool cpc):
-    _collector(collector),
-    _span(span),
-    _mark_stack(mark_stack),
-    _bit_map(bit_map),
-    _keep_alive(keep_alive),
-    _concurrent_precleaning(cpc) {
-    assert(_concurrent_precleaning == _keep_alive->concurrent_precleaning(),
-           "Mismatch");
-  }
-
-  void do_void();
-};
-
-// A parallel version of CMSDrainMarkingStackClosure above.
-class CMSParDrainMarkingStackClosure: public VoidClosure {
-  CMSCollector*           _collector;
-  MemRegion               _span;
-  OopTaskQueue*           _work_queue;
-  CMSBitMap*              _bit_map;
-  CMSInnerParMarkAndPushClosure _mark_and_push;
-
- public:
-  CMSParDrainMarkingStackClosure(CMSCollector* collector,
-                                 MemRegion span, CMSBitMap* bit_map,
-                                 OopTaskQueue* work_queue):
-    _collector(collector),
-    _span(span),
-    _work_queue(work_queue),
-    _bit_map(bit_map),
-    _mark_and_push(collector, span, bit_map, work_queue) { }
-
- public:
-  void trim_queue(uint max);
-  void do_void();
-};
-
-// Allow yielding or short-circuiting of reference list
-// precleaning work.
-class CMSPrecleanRefsYieldClosure: public YieldClosure {
-  CMSCollector* _collector;
-  void do_yield_work();
- public:
-  CMSPrecleanRefsYieldClosure(CMSCollector* collector):
-    _collector(collector) {}
-  virtual bool should_return();
-};
-
-
-// Convenience class that locks free list locks for given CMS collector
-class FreelistLocker: public StackObj {
- private:
-  CMSCollector* _collector;
- public:
-  FreelistLocker(CMSCollector* collector):
-    _collector(collector) {
-    _collector->getFreelistLocks();
-  }
-
-  ~FreelistLocker() {
-    _collector->releaseFreelistLocks();
-  }
-};
-
-// Mark all dead objects in a given space.
-class MarkDeadObjectsClosure: public BlkClosure {
-  const CMSCollector*             _collector;
-  const CompactibleFreeListSpace* _sp;
-  CMSBitMap*                      _live_bit_map;
-  CMSBitMap*                      _dead_bit_map;
-public:
-  MarkDeadObjectsClosure(const CMSCollector* collector,
-                         const CompactibleFreeListSpace* sp,
-                         CMSBitMap *live_bit_map,
-                         CMSBitMap *dead_bit_map) :
-    _collector(collector),
-    _sp(sp),
-    _live_bit_map(live_bit_map),
-    _dead_bit_map(dead_bit_map) {}
-  size_t do_blk(HeapWord* addr);
-};
-
-class TraceCMSMemoryManagerStats : public TraceMemoryManagerStats {
-
- public:
-  TraceCMSMemoryManagerStats(CMSCollector::CollectorState phase, GCCause::Cause cause);
-};
-
-
-#endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,472 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
-#define SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/cmsLockVerifier.hpp"
-#include "gc/cms/compactibleFreeListSpace.inline.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/gcUtil.hpp"
-#include "utilities/align.hpp"
-#include "utilities/bitMap.inline.hpp"
-
-inline void CMSBitMap::clear_all() {
-  assert_locked();
-  // CMS bitmaps are usually cover large memory regions
-  _bm.clear_large();
-  return;
-}
-
-inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
-  return (pointer_delta(addr, _bmStartWord)) >> _shifter;
-}
-
-inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
-  return _bmStartWord + (offset << _shifter);
-}
-
-inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
-  assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
-  return diff >> _shifter;
-}
-
-inline void CMSBitMap::mark(HeapWord* addr) {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  _bm.set_bit(heapWordToOffset(addr));
-}
-
-inline bool CMSBitMap::par_mark(HeapWord* addr) {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return _bm.par_at_put(heapWordToOffset(addr), true);
-}
-
-inline void CMSBitMap::par_clear(HeapWord* addr) {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  _bm.par_at_put(heapWordToOffset(addr), false);
-}
-
-inline void CMSBitMap::mark_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                BitMap::small_range);
-}
-
-inline void CMSBitMap::clear_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                  BitMap::small_range);
-}
-
-inline void CMSBitMap::par_mark_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                    BitMap::small_range);
-}
-
-inline void CMSBitMap::par_clear_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size is usually just 1 bit.
-  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                      BitMap::small_range);
-}
-
-inline void CMSBitMap::mark_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                BitMap::large_range);
-}
-
-inline void CMSBitMap::clear_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                  BitMap::large_range);
-}
-
-inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                    BitMap::large_range);
-}
-
-inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
-  NOT_PRODUCT(region_invariant(mr));
-  // Range size must be greater than 32 bytes.
-  _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
-                      BitMap::large_range);
-}
-
-// Starting at "addr" (inclusive) return a memory region
-// corresponding to the first maximally contiguous marked ("1") region.
-inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
-  return getAndClearMarkedRegion(addr, endWord());
-}
-
-// Starting at "start_addr" (inclusive) return a memory region
-// corresponding to the first maximal contiguous marked ("1") region
-// strictly less than end_addr.
-inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
-                                                    HeapWord* end_addr) {
-  HeapWord *start, *end;
-  assert_locked();
-  start = getNextMarkedWordAddress  (start_addr, end_addr);
-  end   = getNextUnmarkedWordAddress(start,      end_addr);
-  assert(start <= end, "Consistency check");
-  MemRegion mr(start, end);
-  if (!mr.is_empty()) {
-    clear_range(mr);
-  }
-  return mr;
-}
-
-inline bool CMSBitMap::isMarked(HeapWord* addr) const {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return _bm.at(heapWordToOffset(addr));
-}
-
-// The same as isMarked() but without a lock check.
-inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return _bm.at(heapWordToOffset(addr));
-}
-
-
-inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
-  assert_locked();
-  assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
-         "outside underlying space?");
-  return !_bm.at(heapWordToOffset(addr));
-}
-
-// Return the HeapWord address corresponding to next "1" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
-  return getNextMarkedWordAddress(addr, endWord());
-}
-
-// Return the least HeapWord address corresponding to next "1" bit
-// starting at start_addr (inclusive) but strictly less than end_addr.
-inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
-  HeapWord* start_addr, HeapWord* end_addr) const {
-  assert_locked();
-  size_t nextOffset = _bm.get_next_one_offset(
-                        heapWordToOffset(start_addr),
-                        heapWordToOffset(end_addr));
-  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
-  assert(nextAddr >= start_addr &&
-         nextAddr <= end_addr, "get_next_one postcondition");
-  assert((nextAddr == end_addr) ||
-         isMarked(nextAddr), "get_next_one postcondition");
-  return nextAddr;
-}
-
-
-// Return the HeapWord address corresponding to the next "0" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
-  return getNextUnmarkedWordAddress(addr, endWord());
-}
-
-// Return the HeapWord address corresponding to the next "0" bit
-// (inclusive).
-inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
-  HeapWord* start_addr, HeapWord* end_addr) const {
-  assert_locked();
-  size_t nextOffset = _bm.get_next_zero_offset(
-                        heapWordToOffset(start_addr),
-                        heapWordToOffset(end_addr));
-  HeapWord* nextAddr = offsetToHeapWord(nextOffset);
-  assert(nextAddr >= start_addr &&
-         nextAddr <= end_addr, "get_next_zero postcondition");
-  assert((nextAddr == end_addr) ||
-          isUnmarked(nextAddr), "get_next_zero postcondition");
-  return nextAddr;
-}
-
-inline bool CMSBitMap::isAllClear() const {
-  assert_locked();
-  return getNextMarkedWordAddress(startWord()) >= endWord();
-}
-
-inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
-                            HeapWord* right) {
-  assert_locked();
-  left = MAX2(_bmStartWord, left);
-  right = MIN2(_bmStartWord + _bmWordSize, right);
-  if (right > left) {
-    _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
-  }
-}
-
-inline void CMSCollector::save_sweep_limits() {
-  _cmsGen->save_sweep_limit();
-}
-
-inline bool CMSCollector::is_dead_obj(oop obj) const {
-  HeapWord* addr = (HeapWord*)obj;
-  assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
-          && _cmsGen->cmsSpace()->block_is_obj(addr)),
-         "must be object");
-  return  should_unload_classes() &&
-          _collectorState == Sweeping &&
-         !_markBitMap.isMarked(addr);
-}
-
-inline bool CMSCollector::should_abort_preclean() const {
-  // We are in the midst of an "abortable preclean" and either
-  // scavenge is done or foreground GC wants to take over collection
-  return _collectorState == AbortablePreclean &&
-         (_abort_preclean || _foregroundGCIsActive ||
-          CMSHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
-}
-
-inline size_t CMSCollector::get_eden_used() const {
-  return _young_gen->eden()->used();
-}
-
-inline size_t CMSCollector::get_eden_capacity() const {
-  return _young_gen->eden()->capacity();
-}
-
-inline bool CMSStats::valid() const {
-  return _valid_bits == _ALL_VALID;
-}
-
-inline void CMSStats::record_gc0_begin() {
-  if (_gc0_begin_time.is_updated()) {
-    float last_gc0_period = _gc0_begin_time.seconds();
-    _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
-      last_gc0_period, _gc0_alpha);
-    _gc0_alpha = _saved_alpha;
-    _valid_bits |= _GC0_VALID;
-  }
-  _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
-
-  _gc0_begin_time.update();
-}
-
-inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
-  float last_gc0_duration = _gc0_begin_time.seconds();
-  _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
-    last_gc0_duration, _gc0_alpha);
-
-  // Amount promoted.
-  _cms_used_at_gc0_end = cms_gen_bytes_used;
-
-  size_t promoted_bytes = 0;
-  if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
-    promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
-  }
-
-  // If the young gen collection was skipped, then the
-  // number of promoted bytes will be 0 and adding it to the
-  // average will incorrectly lessen the average.  It is, however,
-  // also possible that no promotion was needed.
-  //
-  // _gc0_promoted used to be calculated as
-  // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
-  //  promoted_bytes, _gc0_alpha);
-  _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
-  _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
-
-  // Amount directly allocated.
-  size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
-  _cms_gen->reset_direct_allocated_words();
-  _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
-    allocated_bytes, _gc0_alpha);
-}
-
-inline void CMSStats::record_cms_begin() {
-  _cms_timer.stop();
-
-  // This is just an approximate value, but is good enough.
-  _cms_used_at_cms_begin = _cms_used_at_gc0_end;
-
-  _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
-    (float) _cms_timer.seconds(), _cms_alpha);
-  _cms_begin_time.update();
-
-  _cms_timer.reset();
-  _cms_timer.start();
-}
-
-inline void CMSStats::record_cms_end() {
-  _cms_timer.stop();
-
-  float cur_duration = _cms_timer.seconds();
-  _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
-    cur_duration, _cms_alpha);
-
-  _cms_end_time.update();
-  _cms_alpha = _saved_alpha;
-  _allow_duty_cycle_reduction = true;
-  _valid_bits |= _CMS_VALID;
-
-  _cms_timer.start();
-}
-
-inline double CMSStats::cms_time_since_begin() const {
-  return _cms_begin_time.seconds();
-}
-
-inline double CMSStats::cms_time_since_end() const {
-  return _cms_end_time.seconds();
-}
-
-inline double CMSStats::promotion_rate() const {
-  assert(valid(), "statistics not valid yet");
-  return gc0_promoted() / gc0_period();
-}
-
-inline double CMSStats::cms_allocation_rate() const {
-  assert(valid(), "statistics not valid yet");
-  return cms_allocated() / gc0_period();
-}
-
-inline double CMSStats::cms_consumption_rate() const {
-  assert(valid(), "statistics not valid yet");
-  return (gc0_promoted() + cms_allocated()) / gc0_period();
-}
-
-inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
-  cmsSpace()->save_sweep_limit();
-}
-
-inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
-  return _cmsSpace->used_region_at_save_marks();
-}
-
-template <typename OopClosureType>
-void ConcurrentMarkSweepGeneration::oop_since_save_marks_iterate(OopClosureType* cl) {
-  cl->set_generation(this);
-  cmsSpace()->oop_since_save_marks_iterate(cl);
-  cl->reset_generation();
-  save_marks();
-}
-
-inline void MarkFromRootsClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    do_yield_work();
-  }
-}
-
-inline void ParMarkFromRootsClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive()) {
-    do_yield_work();
-  }
-}
-
-inline void PushOrMarkClosure::do_yield_check() {
-  _parent->do_yield_check();
-}
-
-inline void ParPushOrMarkClosure::do_yield_check() {
-  _parent->do_yield_check();
-}
-
-// Return value of "true" indicates that the on-going preclean
-// should be aborted.
-inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    // Sample young gen size before and after yield
-    _collector->sample_eden();
-    do_yield_work();
-    _collector->sample_eden();
-    return _collector->should_abort_preclean();
-  }
-  return false;
-}
-
-inline void SurvivorSpacePrecleanClosure::do_yield_check() {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    // Sample young gen size before and after yield
-    _collector->sample_eden();
-    do_yield_work();
-    _collector->sample_eden();
-  }
-}
-
-inline void SweepClosure::do_yield_check(HeapWord* addr) {
-  if (ConcurrentMarkSweepThread::should_yield() &&
-      !_collector->foregroundGCIsActive() &&
-      _yield) {
-    do_yield_work(addr);
-  }
-}
-
-inline void MarkRefsIntoAndScanClosure::do_yield_check() {
-  // The conditions are ordered for the remarking phase
-  // when _yield is false.
-  if (_yield &&
-      !_collector->foregroundGCIsActive() &&
-      ConcurrentMarkSweepThread::should_yield()) {
-    do_yield_work();
-  }
-}
-
-
-inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
-  // Align the end of mr so it's at a card boundary.
-  // This is superfluous except at the end of the space;
-  // we should do better than this XXX
-  MemRegion mr2(mr.start(), align_up(mr.end(),
-                CardTable::card_size /* bytes */));
-  _t->mark_range(mr2);
-}
-
-inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
-  // Align the end of mr so it's at a card boundary.
-  // This is superfluous except at the end of the space;
-  // we should do better than this XXX
-  MemRegion mr2(mr.start(), align_up(mr.end(),
-                CardTable::card_size /* bytes */));
-  _t->par_mark_range(mr2);
-}
-
-#endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,311 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/shared/gcId.hpp"
-#include "memory/universe.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/init.hpp"
-#include "runtime/java.hpp"
-#include "runtime/javaCalls.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/os.hpp"
-#include "runtime/vmThread.hpp"
-
-// ======= Concurrent Mark Sweep Thread ========
-
-ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::_cmst = NULL;
-CMSCollector* ConcurrentMarkSweepThread::_collector         = NULL;
-int  ConcurrentMarkSweepThread::_CMS_flag                   = CMS_nil;
-
-volatile jint ConcurrentMarkSweepThread::_pending_yields    = 0;
-
-ConcurrentMarkSweepThread::ConcurrentMarkSweepThread(CMSCollector* collector)
-  : ConcurrentGCThread() {
-  assert(UseConcMarkSweepGC,  "UseConcMarkSweepGC should be set");
-  assert(_cmst == NULL, "CMS thread already created");
-  _cmst = this;
-  assert(_collector == NULL, "Collector already set");
-  _collector = collector;
-
-  set_name("CMS Main Thread");
-
-  // An old comment here said: "Priority should be just less
-  // than that of VMThread".  Since the VMThread runs at
-  // NearMaxPriority, the old comment was inaccurate, but
-  // changing the default priority to NearMaxPriority-1
-  // could change current behavior, so the default of
-  // NearMaxPriority stays in place.
-  //
-  // Note that there's a possibility of the VMThread
-  // starving if UseCriticalCMSThreadPriority is on.
-  // That won't happen on Solaris for various reasons,
-  // but may well happen on non-Solaris platforms.
-  create_and_start(UseCriticalCMSThreadPriority ? CriticalPriority : NearMaxPriority);
-}
-
-void ConcurrentMarkSweepThread::run_service() {
-  assert(this == cmst(), "just checking");
-
-  if (BindCMSThreadToCPU && !os::bind_to_processor(CPUForCMSThread)) {
-    log_warning(gc)("Couldn't bind CMS thread to processor " UINTX_FORMAT, CPUForCMSThread);
-  }
-
-  while (!should_terminate()) {
-    sleepBeforeNextCycle();
-    if (should_terminate()) break;
-    GCIdMark gc_id_mark;
-    GCCause::Cause cause = _collector->_full_gc_requested ?
-      _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
-    _collector->collect_in_background(cause);
-  }
-
-  // Check that the state of any protocol for synchronization
-  // between background (CMS) and foreground collector is "clean"
-  // (i.e. will not potentially block the foreground collector,
-  // requiring action by us).
-  verify_ok_to_terminate();
-}
-
-#ifndef PRODUCT
-void ConcurrentMarkSweepThread::verify_ok_to_terminate() const {
-  assert(!(CGC_lock->owned_by_self() || cms_thread_has_cms_token() ||
-           cms_thread_wants_cms_token()),
-         "Must renounce all worldly possessions and desires for nirvana");
-  _collector->verify_ok_to_terminate();
-}
-#endif
-
-// create and start a new ConcurrentMarkSweep Thread for given CMS generation
-ConcurrentMarkSweepThread* ConcurrentMarkSweepThread::start(CMSCollector* collector) {
-  guarantee(_cmst == NULL, "start() called twice!");
-  ConcurrentMarkSweepThread* th = new ConcurrentMarkSweepThread(collector);
-  assert(_cmst == th, "Where did the just-created CMS thread go?");
-  return th;
-}
-
-void ConcurrentMarkSweepThread::stop_service() {
-  // Now post a notify on CGC_lock so as to nudge
-  // CMS thread(s) that might be slumbering in
-  // sleepBeforeNextCycle.
-  MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  CGC_lock->notify_all();
-}
-
-void ConcurrentMarkSweepThread::threads_do(ThreadClosure* tc) {
-  assert(tc != NULL, "Null ThreadClosure");
-  if (cmst() != NULL && !cmst()->has_terminated()) {
-    tc->do_thread(cmst());
-  }
-  assert(Universe::is_fully_initialized(),
-         "Called too early, make sure heap is fully initialized");
-  if (_collector != NULL) {
-    AbstractWorkGang* gang = _collector->conc_workers();
-    if (gang != NULL) {
-      gang->threads_do(tc);
-    }
-  }
-}
-
-void ConcurrentMarkSweepThread::print_all_on(outputStream* st) {
-  if (cmst() != NULL && !cmst()->has_terminated()) {
-    cmst()->print_on(st);
-    st->cr();
-  }
-  if (_collector != NULL) {
-    AbstractWorkGang* gang = _collector->conc_workers();
-    if (gang != NULL) {
-      gang->print_worker_threads_on(st);
-    }
-  }
-}
-
-void ConcurrentMarkSweepThread::synchronize(bool is_cms_thread) {
-  assert(UseConcMarkSweepGC, "just checking");
-
-  MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  if (!is_cms_thread) {
-    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
-    CMSSynchronousYieldRequest yr;
-    while (CMS_flag_is_set(CMS_cms_has_token)) {
-      // indicate that we want to get the token
-      set_CMS_flag(CMS_vm_wants_token);
-      CGC_lock->wait_without_safepoint_check();
-    }
-    // claim the token and proceed
-    clear_CMS_flag(CMS_vm_wants_token);
-    set_CMS_flag(CMS_vm_has_token);
-  } else {
-    assert(Thread::current()->is_ConcurrentGC_thread(),
-           "Not a CMS thread");
-    // The following barrier assumes there's only one CMS thread.
-    // This will need to be modified is there are more CMS threads than one.
-    while (CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token)) {
-      set_CMS_flag(CMS_cms_wants_token);
-      CGC_lock->wait_without_safepoint_check();
-    }
-    // claim the token
-    clear_CMS_flag(CMS_cms_wants_token);
-    set_CMS_flag(CMS_cms_has_token);
-  }
-}
-
-void ConcurrentMarkSweepThread::desynchronize(bool is_cms_thread) {
-  assert(UseConcMarkSweepGC, "just checking");
-
-  MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  if (!is_cms_thread) {
-    assert(Thread::current()->is_VM_thread(), "Not a VM thread");
-    assert(CMS_flag_is_set(CMS_vm_has_token), "just checking");
-    clear_CMS_flag(CMS_vm_has_token);
-    if (CMS_flag_is_set(CMS_cms_wants_token)) {
-      // wake-up a waiting CMS thread
-      CGC_lock->notify();
-    }
-    assert(!CMS_flag_is_set(CMS_vm_has_token | CMS_vm_wants_token),
-           "Should have been cleared");
-  } else {
-    assert(Thread::current()->is_ConcurrentGC_thread(),
-           "Not a CMS thread");
-    assert(CMS_flag_is_set(CMS_cms_has_token), "just checking");
-    clear_CMS_flag(CMS_cms_has_token);
-    if (CMS_flag_is_set(CMS_vm_wants_token)) {
-      // wake-up a waiting VM thread
-      CGC_lock->notify();
-    }
-    assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
-           "Should have been cleared");
-  }
-}
-
-// Wait until any cms_lock event
-void ConcurrentMarkSweepThread::wait_on_cms_lock(long t_millis) {
-  MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-  if (should_terminate() || _collector->_full_gc_requested) {
-    return;
-  }
-  set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
-  CGC_lock->wait_without_safepoint_check(t_millis);
-  clear_CMS_flag(CMS_cms_wants_token);
-  assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
-         "Should not be set");
-}
-
-// Wait until the next synchronous GC, a concurrent full gc request,
-// or a timeout, whichever is earlier.
-void ConcurrentMarkSweepThread::wait_on_cms_lock_for_scavenge(long t_millis) {
-  // Wait time in millis or 0 value representing infinite wait for a scavenge
-  assert(t_millis >= 0, "Wait time for scavenge should be 0 or positive");
-
-  CMSHeap* heap = CMSHeap::heap();
-  double start_time_secs = os::elapsedTime();
-  double end_time_secs = start_time_secs + (t_millis / ((double) MILLIUNITS));
-
-  // Total collections count before waiting loop
-  unsigned int before_count;
-  {
-    MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-    before_count = heap->total_collections();
-  }
-
-  unsigned int loop_count = 0;
-
-  while(!should_terminate()) {
-    double now_time = os::elapsedTime();
-    long wait_time_millis;
-
-    if(t_millis != 0) {
-      // New wait limit
-      wait_time_millis = (long) ((end_time_secs - now_time) * MILLIUNITS);
-      if(wait_time_millis <= 0) {
-        // Wait time is over
-        break;
-      }
-    } else {
-      // No wait limit, wait if necessary forever
-      wait_time_millis = 0;
-    }
-
-    // Wait until the next event or the remaining timeout
-    {
-      MutexLocker x(CGC_lock, Mutex::_no_safepoint_check_flag);
-
-      if (should_terminate() || _collector->_full_gc_requested) {
-        return;
-      }
-      set_CMS_flag(CMS_cms_wants_token);   // to provoke notifies
-      assert(t_millis == 0 || wait_time_millis > 0, "Sanity");
-      CGC_lock->wait_without_safepoint_check(wait_time_millis);
-      clear_CMS_flag(CMS_cms_wants_token);
-      assert(!CMS_flag_is_set(CMS_cms_has_token | CMS_cms_wants_token),
-             "Should not be set");
-    }
-
-    // Extra wait time check before entering the heap lock to get the collection count
-    if(t_millis != 0 && os::elapsedTime() >= end_time_secs) {
-      // Wait time is over
-      break;
-    }
-
-    // Total collections count after the event
-    unsigned int after_count;
-    {
-      MutexLocker hl(Heap_lock, Mutex::_no_safepoint_check_flag);
-      after_count = heap->total_collections();
-    }
-
-    if(before_count != after_count) {
-      // There was a collection - success
-      break;
-    }
-
-    // Too many loops warning
-    if(++loop_count == 0) {
-      log_warning(gc)("wait_on_cms_lock_for_scavenge() has looped %u times", loop_count - 1);
-    }
-  }
-}
-
-void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
-  while (!should_terminate()) {
-    if(CMSWaitDuration >= 0) {
-      // Wait until the next synchronous GC, a concurrent full gc
-      // request or a timeout, whichever is earlier.
-      wait_on_cms_lock_for_scavenge(CMSWaitDuration);
-    } else {
-      // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
-      wait_on_cms_lock(CMSCheckInterval);
-    }
-    // Check if we should start a CMS collection cycle
-    if (_collector->shouldConcurrentCollect()) {
-      return;
-    }
-    // .. collection criterion not yet met, let's go back
-    // and wait some more
-  }
-}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,169 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_CONCURRENTMARKSWEEPTHREAD_HPP
-#define SHARE_GC_CMS_CONCURRENTMARKSWEEPTHREAD_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/concurrentGCThread.hpp"
-#include "runtime/thread.hpp"
-
-class ConcurrentMarkSweepGeneration;
-class CMSCollector;
-
-// The Concurrent Mark Sweep GC Thread
-class ConcurrentMarkSweepThread: public ConcurrentGCThread {
-  friend class VMStructs;
-  friend class ConcurrentMarkSweepGeneration;   // XXX should remove friendship
-  friend class CMSCollector;
-
- private:
-  static ConcurrentMarkSweepThread* _cmst;
-  static CMSCollector*              _collector;
-
-  enum CMS_flag_type {
-    CMS_nil             = NoBits,
-    CMS_cms_wants_token = nth_bit(0),
-    CMS_cms_has_token   = nth_bit(1),
-    CMS_vm_wants_token  = nth_bit(2),
-    CMS_vm_has_token    = nth_bit(3)
-  };
-
-  static int _CMS_flag;
-
-  static bool CMS_flag_is_set(int b)        { return (_CMS_flag & b) != 0;   }
-  static bool set_CMS_flag(int b)           { return (_CMS_flag |= b) != 0;  }
-  static bool clear_CMS_flag(int b)         { return (_CMS_flag &= ~b) != 0; }
-  void sleepBeforeNextCycle();
-
-  // CMS thread should yield for a young gen collection and direct allocations
-  static char _pad_1[64 - sizeof(jint)];    // prevent cache-line sharing
-  static volatile jint _pending_yields;
-  static char _pad_2[64 - sizeof(jint)];    // prevent cache-line sharing
-
-  // debugging
-  void verify_ok_to_terminate() const PRODUCT_RETURN;
-
-  void run_service();
-  void stop_service();
-
- public:
-  // Constructor
-  ConcurrentMarkSweepThread(CMSCollector* collector);
-
-  static void threads_do(ThreadClosure* tc);
-
-  // Printing
-  static void print_all_on(outputStream* st);
-  static void print_all()                             { print_all_on(tty); }
-
-  // Returns the CMS Thread
-  static ConcurrentMarkSweepThread* cmst()    { return _cmst; }
-  static CMSCollector*         collector()    { return _collector;  }
-
-  // Create and start the CMS Thread, or stop it on shutdown
-  static ConcurrentMarkSweepThread* start(CMSCollector* collector);
-
-  // Synchronization using CMS token
-  static void synchronize(bool is_cms_thread);
-  static void desynchronize(bool is_cms_thread);
-  static bool vm_thread_has_cms_token() {
-    return CMS_flag_is_set(CMS_vm_has_token);
-  }
-  static bool cms_thread_has_cms_token() {
-    return CMS_flag_is_set(CMS_cms_has_token);
-  }
-  static bool vm_thread_wants_cms_token() {
-    return CMS_flag_is_set(CMS_vm_wants_token);
-  }
-  static bool cms_thread_wants_cms_token() {
-    return CMS_flag_is_set(CMS_cms_wants_token);
-  }
-
-  // Wait on CMS lock until the next synchronous GC
-  // or given timeout, whichever is earlier. A timeout value
-  // of 0 indicates that there is no upper bound on the wait time.
-  // A concurrent full gc request terminates the wait.
-  void wait_on_cms_lock(long t_millis);
-
-  // Wait on CMS lock until the next synchronous GC
-  // or given timeout, whichever is earlier. A timeout value
-  // of 0 indicates that there is no upper bound on the wait time.
-  // A concurrent full gc request terminates the wait.
-  void wait_on_cms_lock_for_scavenge(long t_millis);
-
-  // The CMS thread will yield during the work portion of its cycle
-  // only when requested to.
-  // A synchronous request is used for young gen collections and
-  // for direct allocations.  The requesting thread increments
-  // _pending_yields at the beginning of an operation, and decrements
-  // _pending_yields when that operation is completed.
-  // In turn, the CMS thread yields when _pending_yields is positive,
-  // and continues to yield until the value reverts to 0.
-
-  static void increment_pending_yields()   {
-    Atomic::inc(&_pending_yields);
-    assert(_pending_yields >= 0, "can't be negative");
-  }
-  static void decrement_pending_yields()   {
-    Atomic::dec(&_pending_yields);
-    assert(_pending_yields >= 0, "can't be negative");
-  }
-  static bool should_yield()   { return _pending_yields > 0; }
-};
-
-// For scoped increment/decrement of (synchronous) yield requests
-class CMSSynchronousYieldRequest: public StackObj {
- public:
-  CMSSynchronousYieldRequest() {
-    ConcurrentMarkSweepThread::increment_pending_yields();
-  }
-  ~CMSSynchronousYieldRequest() {
-    ConcurrentMarkSweepThread::decrement_pending_yields();
-  }
-};
-
-// Used to emit a warning in case of unexpectedly excessive
-// looping (in "apparently endless loops") in CMS code.
-class CMSLoopCountWarn: public StackObj {
- private:
-  const char* _src;
-  const char* _msg;
-  const intx  _threshold;
-  intx        _ticks;
-
- public:
-  inline CMSLoopCountWarn(const char* src, const char* msg,
-                          const intx threshold) :
-    _src(src), _msg(msg), _threshold(threshold), _ticks(0) { }
-
-  inline void tick() {
-    _ticks++;
-    if (CMSLoopWarn && _ticks % _threshold == 0) {
-      log_warning(gc)("%s has looped " INTX_FORMAT " times %s", _src, _ticks, _msg);
-    }
-  }
-};
-
-#endif // SHARE_GC_CMS_CONCURRENTMARKSWEEPTHREAD_HPP
--- a/src/hotspot/share/gc/cms/freeChunk.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/freeChunk.hpp"
-#include "utilities/copy.hpp"
-
-#ifndef PRODUCT
-
-#define baadbabeHeapWord badHeapWordVal
-#define deadbeefHeapWord 0xdeadbeef
-
-size_t const FreeChunk::header_size() {
-  return sizeof(FreeChunk)/HeapWordSize;
-}
-
-void FreeChunk::mangleAllocated(size_t size) {
-  // mangle all but the header of a just-allocated block
-  // of storage
-  assert(size >= MinChunkSize, "smallest size of object");
-  // we can't assert that _size == size because this may be an
-  // allocation out of a linear allocation block
-  assert(sizeof(FreeChunk) % HeapWordSize == 0,
-         "shouldn't write beyond chunk");
-  HeapWord* addr = (HeapWord*)this;
-  size_t hdr = header_size();
-  Copy::fill_to_words(addr + hdr, size - hdr, baadbabeHeapWord);
-}
-
-void FreeChunk::mangleFreed(size_t sz) {
-  assert(baadbabeHeapWord != deadbeefHeapWord, "Need distinct patterns");
-  // mangle all but the header of a just-freed block of storage
-  // just prior to passing it to the storage dictionary
-  assert(sz >= MinChunkSize, "smallest size of object");
-  assert(sz == size(), "just checking");
-  HeapWord* addr = (HeapWord*)this;
-  size_t hdr = header_size();
-  Copy::fill_to_words(addr + hdr, sz - hdr, deadbeefHeapWord);
-}
-
-void FreeChunk::verifyList() const {
-  FreeChunk* nextFC = next();
-  if (nextFC != NULL) {
-    assert(this == nextFC->prev(), "broken chain");
-    assert(size() == nextFC->size(), "wrong size");
-    nextFC->verifyList();
-  }
-}
-#endif
-
-void FreeChunk::print_on(outputStream* st) {
-  st->print_cr("Next: " PTR_FORMAT " Prev: " PTR_FORMAT " %s",
-    p2i(next()), p2i(prev()), cantCoalesce() ? "[can't coalesce]" : "");
-}
--- a/src/hotspot/share/gc/cms/freeChunk.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,150 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_FREECHUNK_HPP
-#define SHARE_GC_CMS_FREECHUNK_HPP
-
-#include "memory/memRegion.hpp"
-#include "oops/markWord.hpp"
-#include "runtime/mutex.hpp"
-#include "runtime/orderAccess.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/ostream.hpp"
-
-//
-// Free block maintenance for Concurrent Mark Sweep Generation
-//
-// The main data structure for free blocks are
-// . an indexed array of small free blocks, and
-// . a dictionary of large free blocks
-//
-
-// No virtuals in FreeChunk (don't want any vtables).
-
-// A FreeChunk is merely a chunk that can be in a doubly linked list
-// and has a size field. NOTE: FreeChunks are distinguished from allocated
-// objects in two ways (by the sweeper), depending on whether the VM is 32 or
-// 64 bits.
-// In 32 bits or 64 bits without CompressedOops, the second word (prev) has the
-// LSB set to indicate a free chunk; allocated objects' klass() pointers
-// don't have their LSB set. The corresponding bit in the CMSBitMap is
-// set when the chunk is allocated. There are also blocks that "look free"
-// but are not part of the free list and should not be coalesced into larger
-// free blocks. These free blocks have their two LSB's set.
-
-class FreeChunk {
-  friend class VMStructs;
-  // For 64 bit compressed oops, the markWord encodes both the size and the
-  // indication that this is a FreeChunk and not an object.
-  volatile size_t   _size;
-  FreeChunk* _prev;
-  FreeChunk* _next;
-
-  markWord mark()     const volatile { return markWord((uintptr_t)_size); }
-  void set_mark(markWord m)          { _size = (size_t)m.value(); }
-
- public:
-  NOT_PRODUCT(static const size_t header_size();)
-
-  // Returns "true" if the address indicates that the block represents
-  // a free chunk.
-  static bool indicatesFreeChunk(const HeapWord* addr) {
-    // Force volatile read from addr because value might change between
-    // calls.  We really want the read of _mark and _prev from this pointer
-    // to be volatile but making the fields volatile causes all sorts of
-    // compilation errors.
-    return ((volatile FreeChunk*)addr)->is_free();
-  }
-
-  bool is_free() const volatile {
-    LP64_ONLY(if (UseCompressedOops) return mark().is_cms_free_chunk(); else)
-    return (((intptr_t)_prev) & 0x1) == 0x1;
-  }
-  bool cantCoalesce() const {
-    assert(is_free(), "can't get coalesce bit on not free");
-    return (((intptr_t)_prev) & 0x2) == 0x2;
-  }
-  void dontCoalesce() {
-    // the block should be free
-    assert(is_free(), "Should look like a free block");
-    _prev = (FreeChunk*)(((intptr_t)_prev) | 0x2);
-  }
-  FreeChunk* prev() const {
-    return (FreeChunk*)(((intptr_t)_prev) & ~(0x3));
-  }
-
-  debug_only(void* prev_addr() const { return (void*)&_prev; })
-  debug_only(void* next_addr() const { return (void*)&_next; })
-  debug_only(void* size_addr() const { return (void*)&_size; })
-
-  size_t size() const volatile {
-    LP64_ONLY(if (UseCompressedOops) return mark().get_size(); else )
-    return _size;
-  }
-  void set_size(size_t sz) {
-    LP64_ONLY(if (UseCompressedOops) set_mark(markWord::set_size_and_free(sz)); else )
-    _size = sz;
-  }
-
-  FreeChunk* next()   const { return _next; }
-
-  void link_after(FreeChunk* ptr) {
-    link_next(ptr);
-    if (ptr != NULL) ptr->link_prev(this);
-  }
-  void link_next(FreeChunk* ptr) { _next = ptr; }
-  void link_prev(FreeChunk* ptr) {
-    LP64_ONLY(if (UseCompressedOops) _prev = ptr; else)
-    _prev = (FreeChunk*)((intptr_t)ptr | 0x1);
-  }
-  void clear_next()              { _next = NULL; }
-  void markNotFree() {
-    // Set _prev (klass) to null before (if) clearing the mark word below
-    _prev = NULL;
-#ifdef _LP64
-    if (UseCompressedOops) {
-      OrderAccess::storestore();
-      set_mark(markWord::prototype());
-    }
-#endif
-    assert(!is_free(), "Error");
-  }
-
-  // Return the address past the end of this chunk
-  uintptr_t* end() const { return ((uintptr_t*) this) + size(); }
-
-  // debugging
-  void verify()             const PRODUCT_RETURN;
-  void verifyList()         const PRODUCT_RETURN;
-  void mangleAllocated(size_t size) PRODUCT_RETURN;
-  void mangleFreed(size_t size)     PRODUCT_RETURN;
-
-  void print_on(outputStream* st);
-};
-
-extern size_t MinChunkSize;
-
-
-#endif // SHARE_GC_CMS_FREECHUNK_HPP
--- a/src/hotspot/share/gc/cms/gSpaceCounters.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/gSpaceCounters.hpp"
-#include "gc/shared/generation.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "utilities/macros.hpp"
-
-GSpaceCounters::GSpaceCounters(const char* name, int ordinal, size_t max_size,
-                               Generation* g, GenerationCounters* gc,
-                               bool sampled) :
-   _gen(g) {
-
-  if (UsePerfData) {
-    EXCEPTION_MARK;
-    ResourceMark rm;
-
-    const char* cns = PerfDataManager::name_space(gc->name_space(), "space",
-                                                  ordinal);
-
-    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1, mtGC);
-    strcpy(_name_space, cns);
-
-    const char* cname = PerfDataManager::counter_name(_name_space, "name");
-    PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
-
-    cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     (jlong)max_size, CHECK);
-
-    cname = PerfDataManager::counter_name(_name_space, "capacity");
-    _capacity = PerfDataManager::create_variable(SUN_GC, cname,
-                                                 PerfData::U_Bytes,
-                                                 _gen->capacity(), CHECK);
-
-    cname = PerfDataManager::counter_name(_name_space, "used");
-    if (sampled) {
-      _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-                                               new GenerationUsedHelper(_gen),
-                                               CHECK);
-    }
-    else {
-      _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
-                                               (jlong)0, CHECK);
-    }
-
-    cname = PerfDataManager::counter_name(_name_space, "initCapacity");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
-                                     _gen->capacity(), CHECK);
-  }
-}
-
-GSpaceCounters::~GSpaceCounters() {
-  FREE_C_HEAP_ARRAY(char, _name_space);
-}
--- a/src/hotspot/share/gc/cms/gSpaceCounters.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_GSPACECOUNTERS_HPP
-#define SHARE_GC_CMS_GSPACECOUNTERS_HPP
-
-#include "gc/shared/generation.hpp"
-#include "gc/shared/generationCounters.hpp"
-#include "runtime/perfData.hpp"
-#include "utilities/macros.hpp"
-
-// A GSpaceCounter is a holder class for performance counters
-// that track a space;
-
-class GSpaceCounters: public CHeapObj<mtGC> {
-  friend class VMStructs;
-
- private:
-  PerfVariable*      _capacity;
-  PerfVariable*      _used;
-
-  // Constant PerfData types don't need to retain a reference.
-  // However, it's a good idea to document them here.
-  // PerfConstant*     _size;
-
-  Generation*       _gen;
-  char*             _name_space;
-
- public:
-
-  GSpaceCounters(const char* name, int ordinal, size_t max_size, Generation* g,
-                 GenerationCounters* gc, bool sampled=true);
-
-  ~GSpaceCounters();
-
-  inline void update_capacity() {
-    _capacity->set_value(_gen->capacity());
-  }
-
-  inline void update_used() {
-    _used->set_value(_gen->used_stable());
-  }
-
-  // special version of update_used() to allow the used value to be
-  // passed as a parameter. This method can can be used in cases were
-  // the  utilization is already known and/or when the _gen->used()
-  // method is known to be expensive and we want to avoid unnecessary
-  // calls to it.
-  //
-  inline void update_used(size_t used) {
-    _used->set_value(used);
-  }
-
-  inline void inc_used(size_t size) {
-    _used->inc(size);
-  }
-
-  debug_only(
-    // for security reasons, we do not allow arbitrary reads from
-    // the counters as they may live in shared memory.
-    jlong used() {
-      return _used->get_value();
-    }
-    jlong capacity() {
-      return _used->get_value();
-    }
-  )
-
-  inline void update_all() {
-    update_used();
-    update_capacity();
-  }
-
-  const char* name_space() const        { return _name_space; }
-};
-
-class GenerationUsedHelper : public PerfLongSampleHelper {
-  private:
-    Generation* _gen;
-
-  public:
-    GenerationUsedHelper(Generation* g) : _gen(g) { }
-
-    inline jlong take_sample() {
-      return _gen->used_stable();
-    }
-};
-
-#endif // SHARE_GC_CMS_GSPACECOUNTERS_HPP
--- a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,238 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/jvmFlagConstraintsCMS.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/jvmFlagConstraintsGC.hpp"
-#include "memory/universe.hpp"
-#include "runtime/globals_extension.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-static JVMFlag::Error ParallelGCThreadsAndCMSWorkQueueDrainThreshold(uint threads, uintx threshold, bool verbose) {
-  // CMSWorkQueueDrainThreshold is verified to be less than max_juint
-  if (UseConcMarkSweepGC && (threads > (uint)(max_jint / (uint)threshold))) {
-    JVMFlag::printError(verbose,
-                        "ParallelGCThreads (" UINT32_FORMAT ") or CMSWorkQueueDrainThreshold ("
-                        UINTX_FORMAT ") is too large\n",
-                        threads, threshold);
-    return JVMFlag::VIOLATES_CONSTRAINT;
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose) {
-  // To avoid overflow at ParScanClosure::do_oop_work.
-  if (UseConcMarkSweepGC && (value > (max_jint / 10))) {
-    JVMFlag::printError(verbose,
-                        "ParallelGCThreads (" UINT32_FORMAT ") must be "
-                        "less than or equal to " UINT32_FORMAT " for CMS GC\n",
-                        value, (max_jint / 10));
-    return JVMFlag::VIOLATES_CONSTRAINT;
-  }
-  return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(value, CMSWorkQueueDrainThreshold, verbose);
-}
-JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC && (value > ((uintx)max_jint / (uintx)ParallelGCThreads))) {
-    JVMFlag::printError(verbose,
-                        "ParGCStridesPerThread (" UINTX_FORMAT ") must be "
-                        "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
-                        value, ((uintx)max_jint / (uintx)ParallelGCThreads));
-    return JVMFlag::VIOLATES_CONSTRAINT;
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    // ParGCCardsPerStrideChunk should be compared with card table size.
-    size_t heap_size = CMSHeap::heap()->reserved_region().word_size();
-    CardTableRS* ct = GenCollectedHeap::heap()->rem_set();
-    size_t card_table_size = ct->cards_required(heap_size) - 1; // Valid card table size
-
-    if ((size_t)value > card_table_size) {
-      JVMFlag::printError(verbose,
-                          "ParGCCardsPerStrideChunk (" INTX_FORMAT ") is too large for the heap size and "
-                          "must be less than or equal to card table size (" SIZE_FORMAT ")\n",
-                          value, card_table_size);
-      return JVMFlag::VIOLATES_CONSTRAINT;
-    }
-
-    // ParGCCardsPerStrideChunk is used with n_strides(ParallelGCThreads*ParGCStridesPerThread)
-    // from CardTableRS::process_stride(). Note that ParGCStridesPerThread is already checked
-    // not to make an overflow with ParallelGCThreads from its constraint function.
-    uintx n_strides = ParallelGCThreads * ParGCStridesPerThread;
-    uintx ergo_max = max_uintx / n_strides;
-    if ((uintx)value > ergo_max) {
-      JVMFlag::printError(verbose,
-                          "ParGCCardsPerStrideChunk (" INTX_FORMAT ") must be "
-                          "less than or equal to ergonomic maximum (" UINTX_FORMAT ")\n",
-                          value, ergo_max);
-      return JVMFlag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose) {
-  JVMFlag::Error status = JVMFlag::SUCCESS;
-
-  if (UseConcMarkSweepGC) {
-    if (value > CMSOldPLABMax) {
-      JVMFlag::printError(verbose,
-                          "CMSOldPLABMin (" SIZE_FORMAT ") must be "
-                          "less than or equal to CMSOldPLABMax (" SIZE_FORMAT ")\n",
-                          value, CMSOldPLABMax);
-      return JVMFlag::VIOLATES_CONSTRAINT;
-    }
-    status = MaxPLABSizeBounds("CMSOldPLABMin", value, verbose);
-  }
-  return status;
-}
-
-JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose) {
-  JVMFlag::Error status = JVMFlag::SUCCESS;
-
-  if (UseConcMarkSweepGC) {
-    status = MaxPLABSizeBounds("CMSOldPLABMax", value, verbose);
-  }
-  return status;
-}
-
-static JVMFlag::Error CMSReservedAreaConstraintFunc(const char* name, size_t value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    ConcurrentMarkSweepGeneration* cms = CMSHeap::heap()->old_gen();
-    const size_t ergo_max = cms->cmsSpace()->max_flag_size_for_task_size();
-    if (value > ergo_max) {
-      JVMFlag::printError(verbose,
-                          "%s (" SIZE_FORMAT ") must be "
-                          "less than or equal to ergonomic maximum (" SIZE_FORMAT ") "
-                          "which is based on the maximum size of the old generation of the Java heap\n",
-                          name, value, ergo_max);
-      return JVMFlag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose) {
-  JVMFlag::Error status = CMSReservedAreaConstraintFunc("CMSRescanMultiple", value, verbose);
-
-  if (status == JVMFlag::SUCCESS && UseConcMarkSweepGC) {
-    // CMSParRemarkTask::do_dirty_card_rescan_tasks requires CompactibleFreeListSpace::rescan_task_size()
-    // to be aligned to CardTable::card_size * BitsPerWord.
-    // Note that rescan_task_size() will be aligned if CMSRescanMultiple is a multiple of 'HeapWordSize'
-    // because rescan_task_size() is CardTable::card_size / HeapWordSize * BitsPerWord.
-    if (value % HeapWordSize != 0) {
-      JVMFlag::printError(verbose,
-                          "CMSRescanMultiple (" SIZE_FORMAT ") must be "
-                          "a multiple of %d\n",
-                          value, HeapWordSize);
-      status = JVMFlag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return status;
-}
-
-JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose) {
-  return CMSReservedAreaConstraintFunc("CMSConcMarkMultiple", value, verbose);
-}
-
-JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC && (value <= CMSPrecleanNumerator)) {
-    JVMFlag::printError(verbose,
-                        "CMSPrecleanDenominator (" UINTX_FORMAT ") must be "
-                        "strickly greater than CMSPrecleanNumerator (" UINTX_FORMAT ")\n",
-                        value, CMSPrecleanNumerator);
-    return JVMFlag::VIOLATES_CONSTRAINT;
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC && (value >= CMSPrecleanDenominator)) {
-    JVMFlag::printError(verbose,
-                        "CMSPrecleanNumerator (" UINTX_FORMAT ") must be "
-                        "less than CMSPrecleanDenominator (" UINTX_FORMAT ")\n",
-                        value, CMSPrecleanDenominator);
-    return JVMFlag::VIOLATES_CONSTRAINT;
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    size_t max_capacity = CMSHeap::heap()->young_gen()->max_capacity();
-    if (value > max_uintx - max_capacity) {
-    JVMFlag::printError(verbose,
-                        "CMSSamplingGrain (" UINTX_FORMAT ") must be "
-                        "less than or equal to ergonomic maximum (" SIZE_FORMAT ")\n",
-                        value, max_uintx - max_capacity);
-    return JVMFlag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose) {
-  if (UseConcMarkSweepGC) {
-    return ParallelGCThreadsAndCMSWorkQueueDrainThreshold(ParallelGCThreads, value, verbose);
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose) {
-  // Skip for current default value.
-  if (UseConcMarkSweepGC && FLAG_IS_CMDLINE(CMSBitMapYieldQuantum)) {
-    // CMSBitMapYieldQuantum should be compared with mark bitmap size.
-    ConcurrentMarkSweepGeneration* cms = CMSHeap::heap()->old_gen();
-    size_t bitmap_size = cms->collector()->markBitMap()->sizeInWords();
-
-    if (value > bitmap_size) {
-      JVMFlag::printError(verbose,
-                          "CMSBitMapYieldQuantum (" SIZE_FORMAT ") must "
-                          "be less than or equal to bitmap size (" SIZE_FORMAT ") "
-                          "whose size corresponds to the size of old generation of the Java heap\n",
-                          value, bitmap_size);
-      return JVMFlag::VIOLATES_CONSTRAINT;
-    }
-  }
-  return JVMFlag::SUCCESS;
-}
-
-JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose) {
-  if (value == 0) {
-    JVMFlag::printError(verbose,
-                        "OldPLABSize (" SIZE_FORMAT ") must be greater than 0",
-                        value);
-    return JVMFlag::VIOLATES_CONSTRAINT;
-  }
-  // For CMS, OldPLABSize is the number of free blocks of a given size that are used when
-  // replenishing the local per-worker free list caches.
-  // For more details, please refer to Arguments::set_cms_and_parnew_gc_flags().
-  return MaxPLABSizeBounds("OldPLABSize", value, verbose);
-}
--- a/src/hotspot/share/gc/cms/jvmFlagConstraintsCMS.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_JVMFLAGCONSTRAINTSCMS_HPP
-#define SHARE_GC_CMS_JVMFLAGCONSTRAINTSCMS_HPP
-
-#include "runtime/flags/jvmFlag.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-// CMS Flag Constraints
-JVMFlag::Error ParGCStridesPerThreadConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error ParGCCardsPerStrideChunkConstraintFunc(intx value, bool verbose);
-JVMFlag::Error CMSOldPLABMinConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSOldPLABMaxConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSRescanMultipleConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSConcMarkMultipleConstraintFunc(size_t value, bool verbose);
-JVMFlag::Error CMSPrecleanDenominatorConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSPrecleanNumeratorConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSSamplingGrainConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSWorkQueueDrainThresholdConstraintFunc(uintx value, bool verbose);
-JVMFlag::Error CMSBitMapYieldQuantumConstraintFunc(size_t value, bool verbose);
-
-// CMS Subconstraints
-JVMFlag::Error ParallelGCThreadsConstraintFuncCMS(uint value, bool verbose);
-JVMFlag::Error OldPLABSizeConstraintFuncCMS(size_t value, bool verbose);
-
-#endif // SHARE_GC_CMS_JVMFLAGCONSTRAINTSCMS_HPP
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1446 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/stringTable.hpp"
-#include "gc/cms/cmsHeap.inline.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.inline.hpp"
-#include "gc/cms/parOopClosures.inline.hpp"
-#include "gc/serial/defNewGeneration.inline.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/ageTable.inline.hpp"
-#include "gc/shared/copyFailedInfo.hpp"
-#include "gc/shared/gcHeapSummary.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "gc/shared/generation.hpp"
-#include "gc/shared/plab.inline.hpp"
-#include "gc/shared/preservedMarks.inline.hpp"
-#include "gc/shared/referencePolicy.hpp"
-#include "gc/shared/referenceProcessorPhaseTimes.hpp"
-#include "gc/shared/space.hpp"
-#include "gc/shared/spaceDecorator.inline.hpp"
-#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/taskqueue.inline.hpp"
-#include "gc/shared/weakProcessor.hpp"
-#include "gc/shared/workgroup.hpp"
-#include "gc/shared/workerPolicy.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "memory/iterator.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/objArrayOop.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/atomic.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/thread.inline.hpp"
-#include "utilities/copy.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/stack.inline.hpp"
-
-ParScanThreadState::ParScanThreadState(Space* to_space_,
-                                       ParNewGeneration* young_gen_,
-                                       Generation* old_gen_,
-                                       int thread_num_,
-                                       ObjToScanQueueSet* work_queue_set_,
-                                       Stack<oop, mtGC>* overflow_stacks_,
-                                       PreservedMarks* preserved_marks_,
-                                       size_t desired_plab_sz_,
-                                       TaskTerminator& term_) :
-  _work_queue(work_queue_set_->queue(thread_num_)),
-  _overflow_stack(overflow_stacks_ ? overflow_stacks_ + thread_num_ : NULL),
-  _preserved_marks(preserved_marks_),
-  _to_space_alloc_buffer(desired_plab_sz_),
-  _to_space_closure(young_gen_, this),
-  _old_gen_closure(young_gen_, this),
-  _to_space_root_closure(young_gen_, this),
-  _older_gen_closure(young_gen_, this),
-  _old_gen_root_closure(young_gen_, this),
-  _evacuate_followers(this, &_to_space_closure, &_old_gen_closure,
-                      &_to_space_root_closure, young_gen_, &_old_gen_root_closure,
-                      work_queue_set_, term_.terminator()),
-  _is_alive_closure(young_gen_),
-  _scan_weak_ref_closure(young_gen_, this),
-  _keep_alive_closure(&_scan_weak_ref_closure),
-  _to_space(to_space_),
-  _young_gen(young_gen_),
-  _old_gen(old_gen_),
-  _young_old_boundary(NULL),
-  _thread_num(thread_num_),
-  _ageTable(false), // false ==> not the global age table, no perf data.
-  _to_space_full(false),
-  _strong_roots_time(0.0),
-  _term_time(0.0)
-{
-  #if TASKQUEUE_STATS
-  _term_attempts = 0;
-  _overflow_refills = 0;
-  _overflow_refill_objs = 0;
-  #endif // TASKQUEUE_STATS
-
-  _survivor_chunk_array = (ChunkArray*) old_gen()->get_data_recorder(thread_num());
-  _start = os::elapsedTime();
-  _old_gen_closure.set_generation(old_gen_);
-  _old_gen_root_closure.set_generation(old_gen_);
-}
-
-void ParScanThreadState::record_survivor_plab(HeapWord* plab_start,
-                                              size_t plab_word_size) {
-  ChunkArray* sca = survivor_chunk_array();
-  if (sca != NULL) {
-    // A non-null SCA implies that we want the PLAB data recorded.
-    sca->record_sample(plab_start, plab_word_size);
-  }
-}
-
-bool ParScanThreadState::should_be_partially_scanned(oop new_obj, oop old_obj) const {
-  return new_obj->is_objArray() &&
-         arrayOop(new_obj)->length() > ParGCArrayScanChunk &&
-         new_obj != old_obj;
-}
-
-void ParScanThreadState::scan_partial_array_and_push_remainder(oop old) {
-  assert(old->is_objArray(), "must be obj array");
-  assert(old->is_forwarded(), "must be forwarded");
-  assert(CMSHeap::heap()->is_in_reserved(old), "must be in heap.");
-  assert(!old_gen()->is_in(old), "must be in young generation.");
-
-  objArrayOop obj = objArrayOop(old->forwardee());
-  // Process ParGCArrayScanChunk elements now
-  // and push the remainder back onto queue
-  int start     = arrayOop(old)->length();
-  int end       = obj->length();
-  int remainder = end - start;
-  assert(start <= end, "just checking");
-  if (remainder > 2 * ParGCArrayScanChunk) {
-    // Test above combines last partial chunk with a full chunk
-    end = start + ParGCArrayScanChunk;
-    arrayOop(old)->set_length(end);
-    // Push remainder.
-    bool ok = work_queue()->push(old);
-    assert(ok, "just popped, push must be okay");
-  } else {
-    // Restore length so that it can be used if there
-    // is a promotion failure and forwarding pointers
-    // must be removed.
-    arrayOop(old)->set_length(end);
-  }
-
-  // process our set of indices (include header in first chunk)
-  // should make sure end is even (aligned to HeapWord in case of compressed oops)
-  if ((HeapWord *)obj < young_old_boundary()) {
-    // object is in to_space
-    obj->oop_iterate_range(&_to_space_closure, start, end);
-  } else {
-    // object is in old generation
-    obj->oop_iterate_range(&_old_gen_closure, start, end);
-  }
-}
-
-void ParScanThreadState::trim_queues(int max_size) {
-  ObjToScanQueue* queue = work_queue();
-  do {
-    while (queue->size() > (juint)max_size) {
-      oop obj_to_scan;
-      if (queue->pop_local(obj_to_scan)) {
-        if ((HeapWord *)obj_to_scan < young_old_boundary()) {
-          if (obj_to_scan->is_objArray() &&
-              obj_to_scan->is_forwarded() &&
-              obj_to_scan->forwardee() != obj_to_scan) {
-            scan_partial_array_and_push_remainder(obj_to_scan);
-          } else {
-            // object is in to_space
-            obj_to_scan->oop_iterate(&_to_space_closure);
-          }
-        } else {
-          // object is in old generation
-          obj_to_scan->oop_iterate(&_old_gen_closure);
-        }
-      }
-    }
-    // For the  case of compressed oops, we have a private, non-shared
-    // overflow stack, so we eagerly drain it so as to more evenly
-    // distribute load early. Note: this may be good to do in
-    // general rather than delay for the final stealing phase.
-    // If applicable, we'll transfer a set of objects over to our
-    // work queue, allowing them to be stolen and draining our
-    // private overflow stack.
-  } while (ParGCTrimOverflow && young_gen()->take_from_overflow_list(this));
-}
-
-bool ParScanThreadState::take_from_overflow_stack() {
-  assert(ParGCUseLocalOverflow, "Else should not call");
-  assert(young_gen()->overflow_list() == NULL, "Error");
-  ObjToScanQueue* queue = work_queue();
-  Stack<oop, mtGC>* const of_stack = overflow_stack();
-  const size_t num_overflow_elems = of_stack->size();
-  const size_t space_available = queue->max_elems() - queue->size();
-  const size_t num_take_elems = MIN3(space_available / 4,
-                                     (size_t)ParGCDesiredObjsFromOverflowList,
-                                     num_overflow_elems);
-  // Transfer the most recent num_take_elems from the overflow
-  // stack to our work queue.
-  for (size_t i = 0; i != num_take_elems; i++) {
-    oop cur = of_stack->pop();
-    oop obj_to_push = cur->forwardee();
-    assert(CMSHeap::heap()->is_in_reserved(cur), "Should be in heap");
-    assert(!old_gen()->is_in_reserved(cur), "Should be in young gen");
-    assert(CMSHeap::heap()->is_in_reserved(obj_to_push), "Should be in heap");
-    if (should_be_partially_scanned(obj_to_push, cur)) {
-      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
-      obj_to_push = cur;
-    }
-    bool ok = queue->push(obj_to_push);
-    assert(ok, "Should have succeeded");
-  }
-  assert(young_gen()->overflow_list() == NULL, "Error");
-  return num_take_elems > 0;  // was something transferred?
-}
-
-void ParScanThreadState::push_on_overflow_stack(oop p) {
-  assert(ParGCUseLocalOverflow, "Else should not call");
-  overflow_stack()->push(p);
-  assert(young_gen()->overflow_list() == NULL, "Error");
-}
-
-HeapWord* ParScanThreadState::alloc_in_to_space_slow(size_t word_sz) {
-  // If the object is small enough, try to reallocate the buffer.
-  HeapWord* obj = NULL;
-  if (!_to_space_full) {
-    PLAB* const plab = to_space_alloc_buffer();
-    Space* const sp  = to_space();
-    if (word_sz * 100 < ParallelGCBufferWastePct * plab->word_sz()) {
-      // Is small enough; abandon this buffer and start a new one.
-      plab->retire();
-      // The minimum size has to be twice SurvivorAlignmentInBytes to
-      // allow for padding used in the alignment of 1 word.  A padding
-      // of 1 is too small for a filler word so the padding size will
-      // be increased by SurvivorAlignmentInBytes.
-      size_t min_usable_size = 2 * static_cast<size_t>(SurvivorAlignmentInBytes >> LogHeapWordSize);
-      size_t buf_size = MAX2(plab->word_sz(), min_usable_size);
-      HeapWord* buf_space = sp->par_allocate(buf_size);
-      if (buf_space == NULL) {
-        const size_t min_bytes = MAX2(PLAB::min_size(), min_usable_size) << LogHeapWordSize;
-        size_t free_bytes = sp->free();
-        while(buf_space == NULL && free_bytes >= min_bytes) {
-          buf_size = free_bytes >> LogHeapWordSize;
-          assert(buf_size == (size_t)align_object_size(buf_size), "Invariant");
-          buf_space  = sp->par_allocate(buf_size);
-          free_bytes = sp->free();
-        }
-      }
-      if (buf_space != NULL) {
-        plab->set_buf(buf_space, buf_size);
-        record_survivor_plab(buf_space, buf_size);
-        obj = plab->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
-        // Note that we cannot compare buf_size < word_sz below
-        // because of AlignmentReserve (see PLAB::allocate()).
-        assert(obj != NULL || plab->words_remaining() < word_sz,
-               "Else should have been able to allocate requested object size "
-               SIZE_FORMAT ", PLAB size " SIZE_FORMAT ", SurvivorAlignmentInBytes "
-               SIZE_FORMAT ", words_remaining " SIZE_FORMAT,
-               word_sz, buf_size, SurvivorAlignmentInBytes, plab->words_remaining());
-        // It's conceivable that we may be able to use the
-        // buffer we just grabbed for subsequent small requests
-        // even if not for this one.
-      } else {
-        // We're used up.
-        _to_space_full = true;
-      }
-    } else {
-      // Too large; allocate the object individually.
-      obj = sp->par_allocate(word_sz);
-    }
-  }
-  return obj;
-}
-
-void ParScanThreadState::undo_alloc_in_to_space(HeapWord* obj, size_t word_sz) {
-  to_space_alloc_buffer()->undo_allocation(obj, word_sz);
-}
-
-void ParScanThreadState::print_promotion_failure_size() {
-  if (_promotion_failed_info.has_failed()) {
-    log_trace(gc, promotion)(" (%d: promotion failure size = " SIZE_FORMAT ") ",
-                             _thread_num, _promotion_failed_info.first_size());
-  }
-}
-
-class ParScanThreadStateSet: StackObj {
-public:
-  // Initializes states for the specified number of threads;
-  ParScanThreadStateSet(int                     num_threads,
-                        Space&                  to_space,
-                        ParNewGeneration&       young_gen,
-                        Generation&             old_gen,
-                        ObjToScanQueueSet&      queue_set,
-                        Stack<oop, mtGC>*       overflow_stacks_,
-                        PreservedMarksSet&      preserved_marks_set,
-                        size_t                  desired_plab_sz,
-                        TaskTerminator& term);
-
-  ~ParScanThreadStateSet() { TASKQUEUE_STATS_ONLY(reset_stats()); }
-
-  inline ParScanThreadState& thread_state(int i);
-
-  void trace_promotion_failed(const YoungGCTracer* gc_tracer);
-  void reset(uint active_workers, bool promotion_failed);
-  void flush();
-
-  #if TASKQUEUE_STATS
-  static void
-    print_termination_stats_hdr(outputStream* const st);
-  void print_termination_stats();
-  static void
-    print_taskqueue_stats_hdr(outputStream* const st);
-  void print_taskqueue_stats();
-  void reset_stats();
-  #endif // TASKQUEUE_STATS
-
-private:
-  TaskTerminator&         _term;
-  ParNewGeneration&       _young_gen;
-  Generation&             _old_gen;
-  ParScanThreadState*     _per_thread_states;
-  const int               _num_threads;
- public:
-  bool is_valid(int id) const { return id < _num_threads; }
-  ParallelTaskTerminator* terminator() { return _term.terminator(); }
-};
-
-ParScanThreadStateSet::ParScanThreadStateSet(int num_threads,
-                                             Space& to_space,
-                                             ParNewGeneration& young_gen,
-                                             Generation& old_gen,
-                                             ObjToScanQueueSet& queue_set,
-                                             Stack<oop, mtGC>* overflow_stacks,
-                                             PreservedMarksSet& preserved_marks_set,
-                                             size_t desired_plab_sz,
-                                             TaskTerminator& term)
-  : _term(term),
-    _young_gen(young_gen),
-    _old_gen(old_gen),
-    _per_thread_states(NEW_RESOURCE_ARRAY(ParScanThreadState, num_threads)),
-    _num_threads(num_threads)
-{
-  assert(num_threads > 0, "sanity check!");
-  assert(ParGCUseLocalOverflow == (overflow_stacks != NULL),
-         "overflow_stack allocation mismatch");
-  // Initialize states.
-  for (int i = 0; i < num_threads; ++i) {
-    new(_per_thread_states + i)
-      ParScanThreadState(&to_space, &young_gen, &old_gen, i, &queue_set,
-                         overflow_stacks, preserved_marks_set.get(i),
-                         desired_plab_sz, term);
-  }
-}
-
-inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i) {
-  assert(i >= 0 && i < _num_threads, "sanity check!");
-  return _per_thread_states[i];
-}
-
-void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
-  for (int i = 0; i < _num_threads; ++i) {
-    if (thread_state(i).promotion_failed()) {
-      gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
-      thread_state(i).promotion_failed_info().reset();
-    }
-  }
-}
-
-void ParScanThreadStateSet::reset(uint active_threads, bool promotion_failed) {
-  _term.terminator()->reset_for_reuse(active_threads);
-  if (promotion_failed) {
-    for (int i = 0; i < _num_threads; ++i) {
-      thread_state(i).print_promotion_failure_size();
-    }
-  }
-}
-
-#if TASKQUEUE_STATS
-void ParScanThreadState::reset_stats() {
-  taskqueue_stats().reset();
-  _term_attempts = 0;
-  _overflow_refills = 0;
-  _overflow_refill_objs = 0;
-}
-
-void ParScanThreadStateSet::reset_stats() {
-  for (int i = 0; i < _num_threads; ++i) {
-    thread_state(i).reset_stats();
-  }
-}
-
-void ParScanThreadStateSet::print_termination_stats_hdr(outputStream* const st) {
-  st->print_raw_cr("GC Termination Stats");
-  st->print_raw_cr("     elapsed  --strong roots-- -------termination-------");
-  st->print_raw_cr("thr     ms        ms       %       ms       %   attempts");
-  st->print_raw_cr("--- --------- --------- ------ --------- ------ --------");
-}
-
-void ParScanThreadStateSet::print_termination_stats() {
-  Log(gc, task, stats) log;
-  if (!log.is_debug()) {
-    return;
-  }
-
-  ResourceMark rm;
-  LogStream ls(log.debug());
-  outputStream* st = &ls;
-
-  print_termination_stats_hdr(st);
-
-  for (int i = 0; i < _num_threads; ++i) {
-    const ParScanThreadState & pss = thread_state(i);
-    const double elapsed_ms = pss.elapsed_time() * 1000.0;
-    const double s_roots_ms = pss.strong_roots_time() * 1000.0;
-    const double term_ms = pss.term_time() * 1000.0;
-    st->print_cr("%3d %9.2f %9.2f %6.2f %9.2f %6.2f " SIZE_FORMAT_W(8),
-                 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
-                 term_ms, term_ms * 100 / elapsed_ms, pss.term_attempts());
-  }
-}
-
-// Print stats related to work queue activity.
-void ParScanThreadStateSet::print_taskqueue_stats_hdr(outputStream* const st) {
-  st->print_raw_cr("GC Task Stats");
-  st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
-  st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
-}
-
-void ParScanThreadStateSet::print_taskqueue_stats() {
-  if (!log_is_enabled(Trace, gc, task, stats)) {
-    return;
-  }
-  Log(gc, task, stats) log;
-  ResourceMark rm;
-  LogStream ls(log.trace());
-  outputStream* st = &ls;
-  print_taskqueue_stats_hdr(st);
-
-  TaskQueueStats totals;
-  for (int i = 0; i < _num_threads; ++i) {
-    const ParScanThreadState & pss = thread_state(i);
-    const TaskQueueStats & stats = pss.taskqueue_stats();
-    st->print("%3d ", i); stats.print(st); st->cr();
-    totals += stats;
-
-    if (pss.overflow_refills() > 0) {
-      st->print_cr("    " SIZE_FORMAT_W(10) " overflow refills    "
-                   SIZE_FORMAT_W(10) " overflow objects",
-                   pss.overflow_refills(), pss.overflow_refill_objs());
-    }
-  }
-  st->print("tot "); totals.print(st); st->cr();
-
-  DEBUG_ONLY(totals.verify());
-}
-#endif // TASKQUEUE_STATS
-
-void ParScanThreadStateSet::flush() {
-  // Work in this loop should be kept as lightweight as
-  // possible since this might otherwise become a bottleneck
-  // to scaling. Should we add heavy-weight work into this
-  // loop, consider parallelizing the loop into the worker threads.
-  for (int i = 0; i < _num_threads; ++i) {
-    ParScanThreadState& par_scan_state = thread_state(i);
-
-    // Flush stats related to To-space PLAB activity and
-    // retire the last buffer.
-    par_scan_state.to_space_alloc_buffer()->flush_and_retire_stats(_young_gen.plab_stats());
-
-    // Every thread has its own age table.  We need to merge
-    // them all into one.
-    AgeTable *local_table = par_scan_state.age_table();
-    _young_gen.age_table()->merge(local_table);
-
-    // Inform old gen that we're done.
-    _old_gen.par_promote_alloc_done(i);
-  }
-
-  if (UseConcMarkSweepGC) {
-    // We need to call this even when ResizeOldPLAB is disabled
-    // so as to avoid breaking some asserts. While we may be able
-    // to avoid this by reorganizing the code a bit, I am loathe
-    // to do that unless we find cases where ergo leads to bad
-    // performance.
-    CompactibleFreeListSpaceLAB::compute_desired_plab_size();
-  }
-}
-
-ParScanClosure::ParScanClosure(ParNewGeneration* g,
-                               ParScanThreadState* par_scan_state) :
-  OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
-  _boundary = _g->reserved().end();
-}
-
-void ParRootScanWithBarrierTwoGensClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, true); }
-void ParRootScanWithBarrierTwoGensClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, true); }
-
-void ParRootScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, true); }
-void ParRootScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, true); }
-
-ParScanWeakRefClosure::ParScanWeakRefClosure(ParNewGeneration* g,
-                                             ParScanThreadState* par_scan_state)
-  : ScanWeakRefClosure(g), _par_scan_state(par_scan_state)
-{}
-
-#ifdef WIN32
-#pragma warning(disable: 4786) /* identifier was truncated to '255' characters in the browser information */
-#endif
-
-ParEvacuateFollowersClosure::ParEvacuateFollowersClosure(
-    ParScanThreadState* par_scan_state_,
-    ParScanWithoutBarrierClosure* to_space_closure_,
-    ParScanWithBarrierClosure* old_gen_closure_,
-    ParRootScanWithoutBarrierClosure* to_space_root_closure_,
-    ParNewGeneration* par_gen_,
-    ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
-    ObjToScanQueueSet* task_queues_,
-    ParallelTaskTerminator* terminator_) :
-
-    _par_scan_state(par_scan_state_),
-    _to_space_closure(to_space_closure_),
-    _to_space_root_closure(to_space_root_closure_),
-    _old_gen_closure(old_gen_closure_),
-    _old_gen_root_closure(old_gen_root_closure_),
-    _par_gen(par_gen_),
-    _task_queues(task_queues_),
-    _terminator(terminator_)
-{}
-
-void ParEvacuateFollowersClosure::do_void() {
-  ObjToScanQueue* work_q = par_scan_state()->work_queue();
-
-  while (true) {
-    // Scan to-space and old-gen objs until we run out of both.
-    oop obj_to_scan;
-    par_scan_state()->trim_queues(0);
-
-    // We have no local work, attempt to steal from other threads.
-
-    // Attempt to steal work from promoted.
-    if (task_queues()->steal(par_scan_state()->thread_num(),
-                             obj_to_scan)) {
-      bool res = work_q->push(obj_to_scan);
-      assert(res, "Empty queue should have room for a push.");
-
-      // If successful, goto Start.
-      continue;
-
-      // Try global overflow list.
-    } else if (par_gen()->take_from_overflow_list(par_scan_state())) {
-      continue;
-    }
-
-    // Otherwise, offer termination.
-    par_scan_state()->start_term_time();
-    if (terminator()->offer_termination()) break;
-    par_scan_state()->end_term_time();
-  }
-  assert(par_gen()->_overflow_list == NULL && par_gen()->_num_par_pushes == 0,
-         "Broken overflow list?");
-  // Finish the last termination pause.
-  par_scan_state()->end_term_time();
-}
-
-ParNewGenTask::ParNewGenTask(ParNewGeneration* young_gen,
-                             Generation* old_gen,
-                             HeapWord* young_old_boundary,
-                             ParScanThreadStateSet* state_set,
-                             StrongRootsScope* strong_roots_scope) :
-    AbstractGangTask("ParNewGeneration collection"),
-    _young_gen(young_gen), _old_gen(old_gen),
-    _young_old_boundary(young_old_boundary),
-    _state_set(state_set),
-    _strong_roots_scope(strong_roots_scope)
-{}
-
-void ParNewGenTask::work(uint worker_id) {
-  CMSHeap* heap = CMSHeap::heap();
-  // Since this is being done in a separate thread, need new resource
-  // and handle marks.
-  ResourceMark rm;
-  HandleMark hm;
-
-  ParScanThreadState& par_scan_state = _state_set->thread_state(worker_id);
-  assert(_state_set->is_valid(worker_id), "Should not have been called");
-
-  par_scan_state.set_young_old_boundary(_young_old_boundary);
-
-  CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
-                                  heap->rem_set()->cld_rem_set()->accumulate_modified_oops());
-
-  par_scan_state.start_strong_roots();
-  heap->young_process_roots(_strong_roots_scope,
-                           &par_scan_state.to_space_root_closure(),
-                           &par_scan_state.older_gen_closure(),
-                           &cld_scan_closure);
-
-  par_scan_state.end_strong_roots();
-
-  // "evacuate followers".
-  par_scan_state.evacuate_followers_closure().do_void();
-
-  // This will collapse this worker's promoted object list that's
-  // created during the main ParNew parallel phase of ParNew. This has
-  // to be called after all workers have finished promoting objects
-  // and scanning promoted objects. It should be safe calling it from
-  // here, given that we can only reach here after all thread have
-  // offered termination, i.e., after there is no more work to be
-  // done. It will also disable promotion tracking for the rest of
-  // this GC as it's not necessary to be on during reference processing.
-  _old_gen->par_oop_since_save_marks_iterate_done((int) worker_id);
-}
-
-ParNewGeneration::ParNewGeneration(ReservedSpace rs,
-                                   size_t initial_byte_size,
-                                   size_t min_byte_size,
-                                   size_t max_byte_size)
-  : DefNewGeneration(rs, initial_byte_size, min_byte_size, max_byte_size, "CMS young collection pauses"),
-  _plab_stats("Young", YoungPLABSize, PLABWeight),
-  _overflow_list(NULL),
-  _is_alive_closure(this)
-{
-  NOT_PRODUCT(_overflow_counter = ParGCWorkQueueOverflowInterval;)
-  NOT_PRODUCT(_num_par_pushes = 0;)
-  _task_queues = new ObjToScanQueueSet(ParallelGCThreads);
-  guarantee(_task_queues != NULL, "task_queues allocation failure.");
-
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    ObjToScanQueue *q = new ObjToScanQueue();
-    guarantee(q != NULL, "work_queue Allocation failure.");
-    _task_queues->register_queue(i, q);
-  }
-
-  for (uint i = 0; i < ParallelGCThreads; i++) {
-    _task_queues->queue(i)->initialize();
-  }
-
-  _overflow_stacks = NULL;
-  if (ParGCUseLocalOverflow) {
-    // typedef to workaround NEW_C_HEAP_ARRAY macro, which can not deal with ','
-    typedef Stack<oop, mtGC> GCOopStack;
-
-    _overflow_stacks = NEW_C_HEAP_ARRAY(GCOopStack, ParallelGCThreads, mtGC);
-    for (size_t i = 0; i < ParallelGCThreads; ++i) {
-      new (_overflow_stacks + i) Stack<oop, mtGC>();
-    }
-  }
-
-  if (UsePerfData) {
-    EXCEPTION_MARK;
-    ResourceMark rm;
-
-    const char* cname =
-         PerfDataManager::counter_name(_gen_counters->name_space(), "threads");
-    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_None,
-                                     ParallelGCThreads, CHECK);
-  }
-}
-
-// ParNewGeneration::
-ParKeepAliveClosure::ParKeepAliveClosure(ParScanWeakRefClosure* cl) :
-  DefNewGeneration::KeepAliveClosure(cl), _par_cl(cl) {}
-
-template <class T>
-void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
-#ifdef ASSERT
-  {
-    oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
-    // We never expect to see a null reference being processed
-    // as a weak reference.
-    assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
-  }
-#endif // ASSERT
-
-  Devirtualizer::do_oop_no_verify(_par_cl, p);
-
-  if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);;
-    _rs->write_ref_field_gc_par(p, obj);
-  }
-}
-
-void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(oop* p)       { ParKeepAliveClosure::do_oop_work(p); }
-void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop(narrowOop* p) { ParKeepAliveClosure::do_oop_work(p); }
-
-// ParNewGeneration::
-KeepAliveClosure::KeepAliveClosure(ScanWeakRefClosure* cl) :
-  DefNewGeneration::KeepAliveClosure(cl) {}
-
-template <class T>
-void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
-#ifdef ASSERT
-  {
-    oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
-    // We never expect to see a null reference being processed
-    // as a weak reference.
-    assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
-  }
-#endif // ASSERT
-
-  Devirtualizer::do_oop_no_verify(_cl, p);
-
-  if (CMSHeap::heap()->is_in_reserved(p)) {
-    oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
-    _rs->write_ref_field_gc_par(p, obj);
-  }
-}
-
-void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p)       { KeepAliveClosure::do_oop_work(p); }
-void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
-
-template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
-  T heap_oop = RawAccess<>::oop_load(p);
-  if (!CompressedOops::is_null(heap_oop)) {
-    oop obj = CompressedOops::decode_not_null(heap_oop);
-    if ((HeapWord*)obj < _boundary) {
-      assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
-      oop new_obj = obj->is_forwarded()
-                      ? obj->forwardee()
-                      : _g->DefNewGeneration::copy_to_survivor_space(obj);
-      RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
-    }
-    if (_gc_barrier) {
-      // If p points to a younger generation, mark the card.
-      if ((HeapWord*)obj < _gen_boundary) {
-        _rs->write_ref_field_gc_par(p, obj);
-      }
-    }
-  }
-}
-
-void ScanClosureWithParBarrier::do_oop(oop* p)       { ScanClosureWithParBarrier::do_oop_work(p); }
-void ScanClosureWithParBarrier::do_oop(narrowOop* p) { ScanClosureWithParBarrier::do_oop_work(p); }
-
-class ParNewRefProcTaskProxy: public AbstractGangTask {
-  typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
-public:
-  ParNewRefProcTaskProxy(ProcessTask& task,
-                         ParNewGeneration& young_gen,
-                         Generation& old_gen,
-                         HeapWord* young_old_boundary,
-                         ParScanThreadStateSet& state_set);
-
-private:
-  virtual void work(uint worker_id);
-private:
-  ParNewGeneration&      _young_gen;
-  ProcessTask&           _task;
-  Generation&            _old_gen;
-  HeapWord*              _young_old_boundary;
-  ParScanThreadStateSet& _state_set;
-};
-
-ParNewRefProcTaskProxy::ParNewRefProcTaskProxy(ProcessTask& task,
-                                               ParNewGeneration& young_gen,
-                                               Generation& old_gen,
-                                               HeapWord* young_old_boundary,
-                                               ParScanThreadStateSet& state_set)
-  : AbstractGangTask("ParNewGeneration parallel reference processing"),
-    _young_gen(young_gen),
-    _task(task),
-    _old_gen(old_gen),
-    _young_old_boundary(young_old_boundary),
-    _state_set(state_set)
-{ }
-
-void ParNewRefProcTaskProxy::work(uint worker_id) {
-  ResourceMark rm;
-  HandleMark hm;
-  ParScanThreadState& par_scan_state = _state_set.thread_state(worker_id);
-  par_scan_state.set_young_old_boundary(_young_old_boundary);
-  _task.work(worker_id, par_scan_state.is_alive_closure(),
-             par_scan_state.keep_alive_closure(),
-             par_scan_state.evacuate_followers_closure());
-}
-
-void ParNewRefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers) {
-  CMSHeap* gch = CMSHeap::heap();
-  WorkGang* workers = gch->workers();
-  assert(workers != NULL, "Need parallel worker threads.");
-  assert(workers->active_workers() == ergo_workers,
-         "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
-         ergo_workers, workers->active_workers());
-  _state_set.reset(workers->active_workers(), _young_gen.promotion_failed());
-  ParNewRefProcTaskProxy rp_task(task, _young_gen, _old_gen,
-                                 _young_gen.reserved().end(), _state_set);
-  workers->run_task(&rp_task, workers->active_workers());
-  _state_set.reset(0 /* bad value in debug if not reset */,
-                   _young_gen.promotion_failed());
-}
-
-void ParNewRefProcTaskExecutor::set_single_threaded_mode() {
-  _state_set.flush();
-  CMSHeap* heap = CMSHeap::heap();
-  heap->save_marks();
-}
-
-ScanClosureWithParBarrier::
-ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier) :
-  OopsInClassLoaderDataOrGenClosure(g), _g(g), _boundary(g->reserved().end()), _gc_barrier(gc_barrier)
-{ }
-
-template <typename OopClosureType1, typename OopClosureType2>
-EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>::
-EvacuateFollowersClosureGeneral(CMSHeap* heap,
-                                OopClosureType1* cur,
-                                OopClosureType2* older) :
-  _heap(heap),
-  _scan_cur_or_nonheap(cur), _scan_older(older)
-{ }
-
-template <typename OopClosureType1, typename OopClosureType2>
-void EvacuateFollowersClosureGeneral<OopClosureType1, OopClosureType2>::do_void() {
-  do {
-    _heap->oop_since_save_marks_iterate(_scan_cur_or_nonheap,
-                                        _scan_older);
-  } while (!_heap->no_allocs_since_save_marks());
-}
-
-// A Generation that does parallel young-gen collection.
-
-void ParNewGeneration::handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set) {
-  assert(_promo_failure_scan_stack.is_empty(), "post condition");
-  _promo_failure_scan_stack.clear(true); // Clear cached segments.
-
-  remove_forwarding_pointers();
-  log_info(gc, promotion)("Promotion failed");
-  // All the spaces are in play for mark-sweep.
-  swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
-  from()->set_next_compaction_space(to());
-  gch->set_incremental_collection_failed();
-  // Inform the next generation that a promotion failure occurred.
-  _old_gen->promotion_failure_occurred();
-
-  // Trace promotion failure in the parallel GC threads
-  thread_state_set.trace_promotion_failed(gc_tracer());
-  // Single threaded code may have reported promotion failure to the global state
-  if (_promotion_failed_info.has_failed()) {
-    _gc_tracer.report_promotion_failed(_promotion_failed_info);
-  }
-  // Reset the PromotionFailureALot counters.
-  NOT_PRODUCT(gch->reset_promotion_should_fail();)
-}
-
-void ParNewGeneration::collect(bool   full,
-                               bool   clear_all_soft_refs,
-                               size_t size,
-                               bool   is_tlab) {
-  assert(full || size > 0, "otherwise we don't want to collect");
-
-  CMSHeap* gch = CMSHeap::heap();
-
-  _gc_timer->register_gc_start();
-
-  AdaptiveSizePolicy* size_policy = gch->size_policy();
-  WorkGang* workers = gch->workers();
-  assert(workers != NULL, "Need workgang for parallel work");
-  uint active_workers =
-      WorkerPolicy::calc_active_workers(workers->total_workers(),
-                                        workers->active_workers(),
-                                        Threads::number_of_non_daemon_threads());
-  active_workers = workers->update_active_workers(active_workers);
-  log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers->total_workers());
-
-  _old_gen = gch->old_gen();
-
-  // If the next generation is too full to accommodate worst-case promotion
-  // from this generation, pass on collection; let the next generation
-  // do it.
-  if (!collection_attempt_is_safe()) {
-    gch->set_incremental_collection_failed();  // slight lie, in that we did not even attempt one
-    return;
-  }
-  assert(to()->is_empty(), "Else not collection_attempt_is_safe");
-
-  _gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
-  gch->trace_heap_before_gc(gc_tracer());
-
-  init_assuming_no_promotion_failure();
-
-  GCTraceTime(Trace, gc, phases) t1("ParNew", NULL, gch->gc_cause());
-
-  age_table()->clear();
-  to()->clear(SpaceDecorator::Mangle);
-
-  gch->save_marks();
-
-  // Set the correct parallelism (number of queues) in the reference processor
-  ref_processor()->set_active_mt_degree(active_workers);
-
-  // Need to initialize the preserved marks before the ThreadStateSet c'tor.
-  _preserved_marks_set.init(active_workers);
-
-  // Always set the terminator for the active number of workers
-  // because only those workers go through the termination protocol.
-  TaskTerminator _term(active_workers, task_queues());
-  ParScanThreadStateSet thread_state_set(active_workers,
-                                         *to(), *this, *_old_gen, *task_queues(),
-                                         _overflow_stacks, _preserved_marks_set,
-                                         desired_plab_sz(), _term);
-
-  thread_state_set.reset(active_workers, promotion_failed());
-
-  {
-    StrongRootsScope srs(active_workers);
-
-    ParNewGenTask tsk(this, _old_gen, reserved().end(), &thread_state_set, &srs);
-    gch->rem_set()->prepare_for_younger_refs_iterate(true);
-    // It turns out that even when we're using 1 thread, doing the work in a
-    // separate thread causes wide variance in run times.  We can't help this
-    // in the multi-threaded case, but we special-case n=1 here to get
-    // repeatable measurements of the 1-thread overhead of the parallel code.
-    // Might multiple workers ever be used?  If yes, initialization
-    // has been done such that the single threaded path should not be used.
-    if (workers->total_workers() > 1) {
-      workers->run_task(&tsk);
-    } else {
-      tsk.work(0);
-    }
-  }
-
-  thread_state_set.reset(0 /* Bad value in debug if not reset */,
-                         promotion_failed());
-
-  // Trace and reset failed promotion info.
-  if (promotion_failed()) {
-    thread_state_set.trace_promotion_failed(gc_tracer());
-  }
-
-  // Process (weak) reference objects found during scavenge.
-  ReferenceProcessor* rp = ref_processor();
-  IsAliveClosure is_alive(this);
-  ScanWeakRefClosure scan_weak_ref(this);
-  KeepAliveClosure keep_alive(&scan_weak_ref);
-  ScanClosure               scan_without_gc_barrier(this, false);
-  ScanClosureWithParBarrier scan_with_gc_barrier(this, true);
-  set_promo_failure_scan_stack_closure(&scan_without_gc_barrier);
-  EvacuateFollowersClosureGeneral<ScanClosure, ScanClosureWithParBarrier> evacuate_followers(
-      gch, &scan_without_gc_barrier, &scan_with_gc_barrier);
-  rp->setup_policy(clear_all_soft_refs);
-  // Can  the mt_degree be set later (at run_task() time would be best)?
-  rp->set_active_mt_degree(active_workers);
-  ReferenceProcessorStats stats;
-  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
-  if (rp->processing_is_mt()) {
-    ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
-    stats = rp->process_discovered_references(&is_alive, &keep_alive,
-                                              &evacuate_followers, &task_executor,
-                                              &pt);
-  } else {
-    thread_state_set.flush();
-    gch->save_marks();
-    stats = rp->process_discovered_references(&is_alive, &keep_alive,
-                                              &evacuate_followers, NULL,
-                                              &pt);
-  }
-  _gc_tracer.report_gc_reference_stats(stats);
-  _gc_tracer.report_tenuring_threshold(tenuring_threshold());
-  pt.print_all_references();
-
-  assert(gch->no_allocs_since_save_marks(), "evacuation should be done at this point");
-
-  WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
-
-  // Verify that the usage of keep_alive only forwarded
-  // the oops and did not find anything new to copy.
-  assert(gch->no_allocs_since_save_marks(), "unexpectedly copied objects");
-
-  if (!promotion_failed()) {
-    // Swap the survivor spaces.
-    eden()->clear(SpaceDecorator::Mangle);
-    from()->clear(SpaceDecorator::Mangle);
-    if (ZapUnusedHeapArea) {
-      // This is now done here because of the piece-meal mangling which
-      // can check for valid mangling at intermediate points in the
-      // collection(s).  When a young collection fails to collect
-      // sufficient space resizing of the young generation can occur
-      // and redistribute the spaces in the young generation.  Mangle
-      // here so that unzapped regions don't get distributed to
-      // other spaces.
-      to()->mangle_unused_area();
-    }
-    swap_spaces();
-
-    // A successful scavenge should restart the GC time limit count which is
-    // for full GC's.
-    size_policy->reset_gc_overhead_limit_count();
-
-    assert(to()->is_empty(), "to space should be empty now");
-
-    adjust_desired_tenuring_threshold();
-  } else {
-    handle_promotion_failed(gch, thread_state_set);
-  }
-  _preserved_marks_set.reclaim();
-  // set new iteration safe limit for the survivor spaces
-  from()->set_concurrent_iteration_safe_limit(from()->top());
-  to()->set_concurrent_iteration_safe_limit(to()->top());
-
-  plab_stats()->adjust_desired_plab_sz();
-
-  TASKQUEUE_STATS_ONLY(thread_state_set.print_termination_stats());
-  TASKQUEUE_STATS_ONLY(thread_state_set.print_taskqueue_stats());
-
-  // We need to use a monotonically non-decreasing time in ms
-  // or we will see time-warp warnings and os::javaTimeMillis()
-  // does not guarantee monotonicity.
-  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
-  update_time_of_last_gc(now);
-
-  rp->set_enqueuing_is_done(true);
-  rp->verify_no_references_recorded();
-
-  gch->trace_heap_after_gc(gc_tracer());
-
-  _gc_timer->register_gc_end();
-
-  _gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
-}
-
-size_t ParNewGeneration::desired_plab_sz() {
-  return _plab_stats.desired_plab_sz(CMSHeap::heap()->workers()->active_workers());
-}
-
-static int sum;
-void ParNewGeneration::waste_some_time() {
-  for (int i = 0; i < 100; i++) {
-    sum += i;
-  }
-}
-
-static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
-
-// Because of concurrency, there are times where an object for which
-// "is_forwarded()" is true contains an "interim" forwarding pointer
-// value.  Such a value will soon be overwritten with a real value.
-// This method requires "obj" to have a forwarding pointer, and waits, if
-// necessary for a real one to be inserted, and returns it.
-
-oop ParNewGeneration::real_forwardee(oop obj) {
-  oop forward_ptr = obj->forwardee();
-  if (forward_ptr != ClaimedForwardPtr) {
-    return forward_ptr;
-  } else {
-    return real_forwardee_slow(obj);
-  }
-}
-
-oop ParNewGeneration::real_forwardee_slow(oop obj) {
-  // Spin-read if it is claimed but not yet written by another thread.
-  oop forward_ptr = obj->forwardee();
-  while (forward_ptr == ClaimedForwardPtr) {
-    waste_some_time();
-    assert(obj->is_forwarded(), "precondition");
-    forward_ptr = obj->forwardee();
-  }
-  return forward_ptr;
-}
-
-// Multiple GC threads may try to promote an object.  If the object
-// is successfully promoted, a forwarding pointer will be installed in
-// the object in the young generation.  This method claims the right
-// to install the forwarding pointer before it copies the object,
-// thus avoiding the need to undo the copy as in
-// copy_to_survivor_space_avoiding_with_undo.
-
-oop ParNewGeneration::copy_to_survivor_space(ParScanThreadState* par_scan_state,
-                                             oop old,
-                                             size_t sz,
-                                             markWord m) {
-  // In the sequential version, this assert also says that the object is
-  // not forwarded.  That might not be the case here.  It is the case that
-  // the caller observed it to be not forwarded at some time in the past.
-  assert(is_in_reserved(old), "shouldn't be scavenging this oop");
-
-  // The sequential code read "old->age()" below.  That doesn't work here,
-  // since the age is in the mark word, and that might be overwritten with
-  // a forwarding pointer by a parallel thread.  So we must save the mark
-  // word in a local and then analyze it.
-  oopDesc dummyOld;
-  dummyOld.set_mark_raw(m);
-  assert(!dummyOld.is_forwarded(),
-         "should not be called with forwarding pointer mark word.");
-
-  oop new_obj = NULL;
-  oop forward_ptr;
-
-  // Try allocating obj in to-space (unless too old)
-  if (dummyOld.age() < tenuring_threshold()) {
-    new_obj = (oop)par_scan_state->alloc_in_to_space(sz);
-  }
-
-  if (new_obj == NULL) {
-    // Either to-space is full or we decided to promote try allocating obj tenured
-
-    // Attempt to install a null forwarding pointer (atomically),
-    // to claim the right to install the real forwarding pointer.
-    forward_ptr = old->forward_to_atomic(ClaimedForwardPtr, m);
-    if (forward_ptr != NULL) {
-      // someone else beat us to it.
-        return real_forwardee(old);
-    }
-
-    if (!_promotion_failed) {
-      new_obj = _old_gen->par_promote(par_scan_state->thread_num(),
-                                      old, m, sz);
-    }
-
-    if (new_obj == NULL) {
-      // promotion failed, forward to self
-      _promotion_failed = true;
-      new_obj = old;
-
-      par_scan_state->preserved_marks()->push_if_necessary(old, m);
-      par_scan_state->register_promotion_failure(sz);
-    }
-
-    old->forward_to(new_obj);
-    forward_ptr = NULL;
-  } else {
-    // Is in to-space; do copying ourselves.
-    Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)new_obj, sz);
-    assert(CMSHeap::heap()->is_in_reserved(new_obj), "illegal forwarding pointer value.");
-    forward_ptr = old->forward_to_atomic(new_obj, m);
-    // Restore the mark word copied above.
-    new_obj->set_mark_raw(m);
-    // Increment age if obj still in new generation
-    new_obj->incr_age();
-    par_scan_state->age_table()->add(new_obj, sz);
-  }
-  assert(new_obj != NULL, "just checking");
-
-  // This code must come after the CAS test, or it will print incorrect
-  // information.
-  log_develop_trace(gc, scavenge)("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-                                  is_in_reserved(new_obj) ? "copying" : "tenuring",
-                                  new_obj->klass()->internal_name(), p2i(old), p2i(new_obj), new_obj->size());
-
-  if (forward_ptr == NULL) {
-    oop obj_to_push = new_obj;
-    if (par_scan_state->should_be_partially_scanned(obj_to_push, old)) {
-      // Length field used as index of next element to be scanned.
-      // Real length can be obtained from real_forwardee()
-      arrayOop(old)->set_length(0);
-      obj_to_push = old;
-      assert(obj_to_push->is_forwarded() && obj_to_push->forwardee() != obj_to_push,
-             "push forwarded object");
-    }
-    // Push it on one of the queues of to-be-scanned objects.
-    bool simulate_overflow = false;
-    NOT_PRODUCT(
-      if (ParGCWorkQueueOverflowALot && should_simulate_overflow()) {
-        // simulate a stack overflow
-        simulate_overflow = true;
-      }
-    )
-    if (simulate_overflow || !par_scan_state->work_queue()->push(obj_to_push)) {
-      // Add stats for overflow pushes.
-      log_develop_trace(gc)("Queue Overflow");
-      push_on_overflow_list(old, par_scan_state);
-      TASKQUEUE_STATS_ONLY(par_scan_state->taskqueue_stats().record_overflow(0));
-    }
-
-    return new_obj;
-  }
-
-  // Oops.  Someone beat us to it.  Undo the allocation.  Where did we
-  // allocate it?
-  if (is_in_reserved(new_obj)) {
-    // Must be in to_space.
-    assert(to()->is_in_reserved(new_obj), "Checking");
-    if (forward_ptr == ClaimedForwardPtr) {
-      // Wait to get the real forwarding pointer value.
-      forward_ptr = real_forwardee(old);
-    }
-    par_scan_state->undo_alloc_in_to_space((HeapWord*)new_obj, sz);
-  }
-
-  return forward_ptr;
-}
-
-#ifndef PRODUCT
-// It's OK to call this multi-threaded;  the worst thing
-// that can happen is that we'll get a bunch of closely
-// spaced simulated overflows, but that's OK, in fact
-// probably good as it would exercise the overflow code
-// under contention.
-bool ParNewGeneration::should_simulate_overflow() {
-  if (_overflow_counter-- <= 0) { // just being defensive
-    _overflow_counter = ParGCWorkQueueOverflowInterval;
-    return true;
-  } else {
-    return false;
-  }
-}
-#endif
-
-// In case we are using compressed oops, we need to be careful.
-// If the object being pushed is an object array, then its length
-// field keeps track of the "grey boundary" at which the next
-// incremental scan will be done (see ParGCArrayScanChunk).
-// When using compressed oops, this length field is kept in the
-// lower 32 bits of the erstwhile klass word and cannot be used
-// for the overflow chaining pointer (OCP below). As such the OCP
-// would itself need to be compressed into the top 32-bits in this
-// case. Unfortunately, see below, in the event that we have a
-// promotion failure, the node to be pushed on the list can be
-// outside of the Java heap, so the heap-based pointer compression
-// would not work (we would have potential aliasing between C-heap
-// and Java-heap pointers). For this reason, when using compressed
-// oops, we simply use a worker-thread-local, non-shared overflow
-// list in the form of a growable array, with a slightly different
-// overflow stack draining strategy. If/when we start using fat
-// stacks here, we can go back to using (fat) pointer chains
-// (although some performance comparisons would be useful since
-// single global lists have their own performance disadvantages
-// as we were made painfully aware not long ago, see 6786503).
-#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
-void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
-  assert(is_in_reserved(from_space_obj), "Should be from this generation");
-  if (ParGCUseLocalOverflow) {
-    // In the case of compressed oops, we use a private, not-shared
-    // overflow stack.
-    par_scan_state->push_on_overflow_stack(from_space_obj);
-  } else {
-    assert(!UseCompressedOops, "Error");
-    // if the object has been forwarded to itself, then we cannot
-    // use the klass pointer for the linked list.  Instead we have
-    // to allocate an oopDesc in the C-Heap and use that for the linked list.
-    // XXX This is horribly inefficient when a promotion failure occurs
-    // and should be fixed. XXX FIX ME !!!
-#ifndef PRODUCT
-    Atomic::inc(&_num_par_pushes);
-    assert(_num_par_pushes > 0, "Tautology");
-#endif
-    if (from_space_obj->forwardee() == from_space_obj) {
-      oopDesc* listhead = NEW_C_HEAP_OBJ(oopDesc, mtGC);
-      listhead->forward_to(from_space_obj);
-      from_space_obj = listhead;
-    }
-    oop observed_overflow_list = _overflow_list;
-    oop cur_overflow_list;
-    do {
-      cur_overflow_list = observed_overflow_list;
-      if (cur_overflow_list != BUSY) {
-        from_space_obj->set_klass_to_list_ptr(cur_overflow_list);
-      } else {
-        from_space_obj->set_klass_to_list_ptr(NULL);
-      }
-      observed_overflow_list =
-        Atomic::cmpxchg((oopDesc*)from_space_obj, &_overflow_list, (oopDesc*)cur_overflow_list);
-    } while (cur_overflow_list != observed_overflow_list);
-  }
-}
-
-bool ParNewGeneration::take_from_overflow_list(ParScanThreadState* par_scan_state) {
-  bool res;
-
-  if (ParGCUseLocalOverflow) {
-    res = par_scan_state->take_from_overflow_stack();
-  } else {
-    assert(!UseCompressedOops, "Error");
-    res = take_from_overflow_list_work(par_scan_state);
-  }
-  return res;
-}
-
-
-// *NOTE*: The overflow list manipulation code here and
-// in CMSCollector:: are very similar in shape,
-// except that in the CMS case we thread the objects
-// directly into the list via their mark word, and do
-// not need to deal with special cases below related
-// to chunking of object arrays and promotion failure
-// handling.
-// CR 6797058 has been filed to attempt consolidation of
-// the common code.
-// Because of the common code, if you make any changes in
-// the code below, please check the CMS version to see if
-// similar changes might be needed.
-// See CMSCollector::par_take_from_overflow_list() for
-// more extensive documentation comments.
-bool ParNewGeneration::take_from_overflow_list_work(ParScanThreadState* par_scan_state) {
-  ObjToScanQueue* work_q = par_scan_state->work_queue();
-  // How many to take?
-  size_t objsFromOverflow = MIN2((size_t)(work_q->max_elems() - work_q->size())/4,
-                                 (size_t)ParGCDesiredObjsFromOverflowList);
-
-  assert(!UseCompressedOops, "Error");
-  assert(par_scan_state->overflow_stack() == NULL, "Error");
-  if (_overflow_list == NULL) return false;
-
-  // Otherwise, there was something there; try claiming the list.
-  oop prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
-  // Trim off a prefix of at most objsFromOverflow items
-  size_t spin_count = ParallelGCThreads;
-  size_t sleep_time_millis = MAX2((size_t)1, objsFromOverflow/100);
-  for (size_t spin = 0; prefix == BUSY && spin < spin_count; spin++) {
-    // someone grabbed it before we did ...
-    // ... we spin/block for a short while...
-    os::naked_sleep(sleep_time_millis);
-    if (_overflow_list == NULL) {
-      // nothing left to take
-      return false;
-    } else if (_overflow_list != BUSY) {
-     // try and grab the prefix
-     prefix = cast_to_oop(Atomic::xchg((oopDesc*)BUSY, &_overflow_list));
-    }
-  }
-  if (prefix == NULL || prefix == BUSY) {
-     // Nothing to take or waited long enough
-     if (prefix == NULL) {
-       // Write back the NULL in case we overwrote it with BUSY above
-       // and it is still the same value.
-       (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
-     }
-     return false;
-  }
-  assert(prefix != NULL && prefix != BUSY, "Error");
-  oop cur = prefix;
-  for (size_t i = 1; i < objsFromOverflow; ++i) {
-    oop next = cur->list_ptr_from_klass();
-    if (next == NULL) break;
-    cur = next;
-  }
-  assert(cur != NULL, "Loop postcondition");
-
-  // Reattach remaining (suffix) to overflow list
-  oop suffix = cur->list_ptr_from_klass();
-  if (suffix == NULL) {
-    // Write back the NULL in lieu of the BUSY we wrote
-    // above and it is still the same value.
-    if (_overflow_list == BUSY) {
-      (void) Atomic::cmpxchg((oopDesc*)NULL, &_overflow_list, (oopDesc*)BUSY);
-    }
-  } else {
-    assert(suffix != BUSY, "Error");
-    // suffix will be put back on global list
-    cur->set_klass_to_list_ptr(NULL);     // break off suffix
-    // It's possible that the list is still in the empty(busy) state
-    // we left it in a short while ago; in that case we may be
-    // able to place back the suffix.
-    oop observed_overflow_list = _overflow_list;
-    oop cur_overflow_list = observed_overflow_list;
-    bool attached = false;
-    while (observed_overflow_list == BUSY || observed_overflow_list == NULL) {
-      observed_overflow_list =
-        Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
-      if (cur_overflow_list == observed_overflow_list) {
-        attached = true;
-        break;
-      } else cur_overflow_list = observed_overflow_list;
-    }
-    if (!attached) {
-      // Too bad, someone else got in in between; we'll need to do a splice.
-      // Find the last item of suffix list
-      oop last = suffix;
-      while (true) {
-        oop next = last->list_ptr_from_klass();
-        if (next == NULL) break;
-        last = next;
-      }
-      // Atomically prepend suffix to current overflow list
-      observed_overflow_list = _overflow_list;
-      do {
-        cur_overflow_list = observed_overflow_list;
-        if (cur_overflow_list != BUSY) {
-          // Do the splice ...
-          last->set_klass_to_list_ptr(cur_overflow_list);
-        } else { // cur_overflow_list == BUSY
-          last->set_klass_to_list_ptr(NULL);
-        }
-        observed_overflow_list =
-          Atomic::cmpxchg((oopDesc*)suffix, &_overflow_list, (oopDesc*)cur_overflow_list);
-      } while (cur_overflow_list != observed_overflow_list);
-    }
-  }
-
-  // Push objects on prefix list onto this thread's work queue
-  assert(prefix != NULL && prefix != BUSY, "program logic");
-  cur = prefix;
-  ssize_t n = 0;
-  while (cur != NULL) {
-    oop obj_to_push = cur->forwardee();
-    oop next        = cur->list_ptr_from_klass();
-    cur->set_klass(obj_to_push->klass());
-    // This may be an array object that is self-forwarded. In that case, the list pointer
-    // space, cur, is not in the Java heap, but rather in the C-heap and should be freed.
-    if (!is_in_reserved(cur)) {
-      // This can become a scaling bottleneck when there is work queue overflow coincident
-      // with promotion failure.
-      oopDesc* f = cur;
-      FREE_C_HEAP_OBJ(f);
-    } else if (par_scan_state->should_be_partially_scanned(obj_to_push, cur)) {
-      assert(arrayOop(cur)->length() == 0, "entire array remaining to be scanned");
-      obj_to_push = cur;
-    }
-    bool ok = work_q->push(obj_to_push);
-    assert(ok, "Should have succeeded");
-    cur = next;
-    n++;
-  }
-  TASKQUEUE_STATS_ONLY(par_scan_state->note_overflow_refill(n));
-#ifndef PRODUCT
-  assert(_num_par_pushes >= n, "Too many pops?");
-  Atomic::sub(n, &_num_par_pushes);
-#endif
-  return true;
-}
-#undef BUSY
-
-void ParNewGeneration::ref_processor_init() {
-  if (_ref_processor == NULL) {
-    // Allocate and initialize a reference processor
-    _span_based_discoverer.set_span(_reserved);
-    _ref_processor =
-      new ReferenceProcessor(&_span_based_discoverer,    // span
-                             ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
-                             ParallelGCThreads,          // mt processing degree
-                             refs_discovery_is_mt(),     // mt discovery
-                             ParallelGCThreads,          // mt discovery degree
-                             refs_discovery_is_atomic(), // atomic_discovery
-                             NULL,                       // is_alive_non_header
-                             false);                     // disable adjusting number of processing threads
-  }
-}
-
-const char* ParNewGeneration::name() const {
-  return "par new generation";
-}
-
-void ParNewGeneration::restore_preserved_marks() {
-  SharedRestorePreservedMarksTaskExecutor task_executor(CMSHeap::heap()->workers());
-  _preserved_marks_set.restore(&task_executor);
-}
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,420 +0,0 @@
-/*
- * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PARNEWGENERATION_HPP
-#define SHARE_GC_CMS_PARNEWGENERATION_HPP
-
-#include "gc/cms/parOopClosures.hpp"
-#include "gc/serial/defNewGeneration.hpp"
-#include "gc/shared/copyFailedInfo.hpp"
-#include "gc/shared/gcTrace.hpp"
-#include "gc/shared/oopStorageParState.hpp"
-#include "gc/shared/plab.hpp"
-#include "gc/shared/preservedMarks.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "memory/padded.hpp"
-
-class ChunkArray;
-class CMSHeap;
-class ParScanWithoutBarrierClosure;
-class ParScanWithBarrierClosure;
-class ParRootScanWithoutBarrierClosure;
-class ParRootScanWithBarrierTwoGensClosure;
-class ParEvacuateFollowersClosure;
-class StrongRootsScope;
-
-// It would be better if these types could be kept local to the .cpp file,
-// but they must be here to allow ParScanClosure::do_oop_work to be defined
-// in genOopClosures.inline.hpp.
-
-typedef Padded<OopTaskQueue> ObjToScanQueue;
-typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
-
-class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
- private:
-  ParScanWeakRefClosure* _par_cl;
- protected:
-  template <class T> void do_oop_work(T* p);
- public:
-  ParKeepAliveClosure(ParScanWeakRefClosure* cl);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// The state needed by thread performing parallel young-gen collection.
-class ParScanThreadState {
-  friend class ParScanThreadStateSet;
- private:
-  ObjToScanQueue *_work_queue;
-  Stack<oop, mtGC>* const _overflow_stack;
-  PreservedMarks* const _preserved_marks;
-
-  PLAB _to_space_alloc_buffer;
-
-  ParScanWithoutBarrierClosure         _to_space_closure; // scan_without_gc_barrier
-  ParScanWithBarrierClosure            _old_gen_closure; // scan_with_gc_barrier
-  ParRootScanWithoutBarrierClosure     _to_space_root_closure; // scan_root_without_gc_barrier
-  // Will be passed to process_roots to set its generation.
-  ParRootScanWithBarrierTwoGensClosure _older_gen_closure;
-  // This closure will always be bound to the old gen; it will be used
-  // in evacuate_followers.
-  ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier
-  ParEvacuateFollowersClosure          _evacuate_followers;
-  DefNewGeneration::IsAliveClosure     _is_alive_closure;
-  ParScanWeakRefClosure                _scan_weak_ref_closure;
-  ParKeepAliveClosure                  _keep_alive_closure;
-
-  Space* _to_space;
-  Space* to_space() { return _to_space; }
-
-  ParNewGeneration* _young_gen;
-  ParNewGeneration* young_gen() const { return _young_gen; }
-
-  Generation* _old_gen;
-  Generation* old_gen() { return _old_gen; }
-
-  HeapWord *_young_old_boundary;
-
-  int _thread_num;
-  AgeTable _ageTable;
-
-  bool _to_space_full;
-
-#if TASKQUEUE_STATS
-  size_t _term_attempts;
-  size_t _overflow_refills;
-  size_t _overflow_refill_objs;
-#endif // TASKQUEUE_STATS
-
-  // Stats for promotion failure
-  PromotionFailedInfo _promotion_failed_info;
-
-  // Timing numbers.
-  double _start;
-  double _start_strong_roots;
-  double _strong_roots_time;
-  double _start_term;
-  double _term_time;
-
-  // Helper for trim_queues. Scans subset of an array and makes
-  // remainder available for work stealing.
-  void scan_partial_array_and_push_remainder(oop obj);
-
-  // In support of CMS' parallel rescan of survivor space.
-  ChunkArray* _survivor_chunk_array;
-  ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; }
-
-  void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size);
-
-  ParScanThreadState(Space* to_space_, ParNewGeneration* gen_,
-                     Generation* old_gen_, int thread_num_,
-                     ObjToScanQueueSet* work_queue_set_,
-                     Stack<oop, mtGC>* overflow_stacks_,
-                     PreservedMarks* preserved_marks_,
-                     size_t desired_plab_sz_,
-                     TaskTerminator& term_);
-
- public:
-  AgeTable* age_table() {return &_ageTable;}
-
-  ObjToScanQueue* work_queue() { return _work_queue; }
-
-  PreservedMarks* preserved_marks() const { return _preserved_marks; }
-
-  PLAB* to_space_alloc_buffer() {
-    return &_to_space_alloc_buffer;
-  }
-
-  ParEvacuateFollowersClosure&      evacuate_followers_closure() { return _evacuate_followers; }
-  DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; }
-  ParScanWeakRefClosure&            scan_weak_ref_closure() { return _scan_weak_ref_closure; }
-  ParKeepAliveClosure&              keep_alive_closure() { return _keep_alive_closure; }
-  ParScanClosure&                   older_gen_closure() { return _older_gen_closure; }
-  ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; };
-
-  // Decrease queue size below "max_size".
-  void trim_queues(int max_size);
-
-  // Private overflow stack usage
-  Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; }
-  bool take_from_overflow_stack();
-  void push_on_overflow_stack(oop p);
-
-  // Is new_obj a candidate for scan_partial_array_and_push_remainder method.
-  inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const;
-
-  int  thread_num() { return _thread_num; }
-
-  // Allocate a to-space block of size "sz", or else return NULL.
-  HeapWord* alloc_in_to_space_slow(size_t word_sz);
-
-  inline HeapWord* alloc_in_to_space(size_t word_sz);
-
-  HeapWord* young_old_boundary() { return _young_old_boundary; }
-
-  void set_young_old_boundary(HeapWord *boundary) {
-    _young_old_boundary = boundary;
-  }
-
-  // Undo the most recent allocation ("obj", of "word_sz").
-  void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
-
-  // Promotion failure stats
-  void register_promotion_failure(size_t sz) {
-    _promotion_failed_info.register_copy_failure(sz);
-  }
-  PromotionFailedInfo& promotion_failed_info() {
-    return _promotion_failed_info;
-  }
-  bool promotion_failed() {
-    return _promotion_failed_info.has_failed();
-  }
-  void print_promotion_failure_size();
-
-#if TASKQUEUE_STATS
-  TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
-
-  size_t term_attempts() const             { return _term_attempts; }
-  size_t overflow_refills() const          { return _overflow_refills; }
-  size_t overflow_refill_objs() const      { return _overflow_refill_objs; }
-
-  void note_term_attempt()                 { ++_term_attempts; }
-  void note_overflow_refill(size_t objs)   {
-    ++_overflow_refills; _overflow_refill_objs += objs;
-  }
-
-  void reset_stats();
-#endif // TASKQUEUE_STATS
-
-  void start_strong_roots() {
-    _start_strong_roots = os::elapsedTime();
-  }
-  void end_strong_roots() {
-    _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
-  }
-  double strong_roots_time() const { return _strong_roots_time; }
-  void start_term_time() {
-    TASKQUEUE_STATS_ONLY(note_term_attempt());
-    _start_term = os::elapsedTime();
-  }
-  void end_term_time() {
-    _term_time += (os::elapsedTime() - _start_term);
-  }
-  double term_time() const { return _term_time; }
-
-  double elapsed_time() const {
-    return os::elapsedTime() - _start;
-  }
-};
-
-class ParNewGenTask: public AbstractGangTask {
- private:
-  ParNewGeneration*            _young_gen;
-  Generation*                  _old_gen;
-  HeapWord*                    _young_old_boundary;
-  class ParScanThreadStateSet* _state_set;
-  StrongRootsScope*            _strong_roots_scope;
-
-public:
-  ParNewGenTask(ParNewGeneration*      young_gen,
-                Generation*            old_gen,
-                HeapWord*              young_old_boundary,
-                ParScanThreadStateSet* state_set,
-                StrongRootsScope*      strong_roots_scope);
-
-  HeapWord* young_old_boundary() { return _young_old_boundary; }
-
-  void work(uint worker_id);
-};
-
-class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure {
- protected:
-  template <class T> void do_oop_work(T* p);
- public:
-  KeepAliveClosure(ScanWeakRefClosure* cl);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-template <typename OopClosureType1, typename OopClosureType2>
-class EvacuateFollowersClosureGeneral: public VoidClosure {
- private:
-  CMSHeap* _heap;
-  OopClosureType1* _scan_cur_or_nonheap;
-  OopClosureType2* _scan_older;
- public:
-  EvacuateFollowersClosureGeneral(CMSHeap* heap,
-                                  OopClosureType1* cur,
-                                  OopClosureType2* older);
-  virtual void do_void();
-};
-
-// Closure for scanning ParNewGeneration.
-// Same as ScanClosure, except does parallel GC barrier.
-class ScanClosureWithParBarrier: public OopsInClassLoaderDataOrGenClosure {
- private:
-  ParNewGeneration* _g;
-  HeapWord*         _boundary;
-  bool              _gc_barrier;
-
-  template <class T> void do_oop_work(T* p);
-
- public:
-  ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-// Implements AbstractRefProcTaskExecutor for ParNew.
-class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
- private:
-  ParNewGeneration&      _young_gen;
-  Generation&            _old_gen;
-  ParScanThreadStateSet& _state_set;
- public:
-  ParNewRefProcTaskExecutor(ParNewGeneration& young_gen,
-                            Generation& old_gen,
-                            ParScanThreadStateSet& state_set)
-    : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set)
-  { }
-
-  // Executes a task using worker threads.
-  virtual void execute(ProcessTask& task, uint ergo_workers);
-  // Switch to single threaded mode.
-  virtual void set_single_threaded_mode();
-};
-
-
-// A Generation that does parallel young-gen collection.
-
-class ParNewGeneration: public DefNewGeneration {
-  friend class ParNewGenTask;
-  friend class ParNewRefProcTask;
-  friend class ParNewRefProcTaskExecutor;
-  friend class ParScanThreadStateSet;
-  friend class ParEvacuateFollowersClosure;
-
- private:
-  // The per-worker-thread work queues
-  ObjToScanQueueSet* _task_queues;
-
-  // Per-worker-thread local overflow stacks
-  Stack<oop, mtGC>* _overflow_stacks;
-
-  // Desired size of survivor space plab's
-  PLABStats _plab_stats;
-
-  // A list of from-space images of to-be-scanned objects, threaded through
-  // klass-pointers (klass information already copied to the forwarded
-  // image.)  Manipulated with CAS.
-  oopDesc* volatile _overflow_list;
-  NOT_PRODUCT(ssize_t _num_par_pushes;)
-
-  // This closure is used by the reference processor to filter out
-  // references to live referent.
-  DefNewGeneration::IsAliveClosure _is_alive_closure;
-
-  // GC tracer that should be used during collection.
-  ParNewTracer _gc_tracer;
-
-  static oop real_forwardee_slow(oop obj);
-  static void waste_some_time();
-
-  void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set);
-
- protected:
-
-  void restore_preserved_marks();
-
- public:
-  ParNewGeneration(ReservedSpace rs,
-                   size_t initial_byte_size,
-                   size_t min_byte_size,
-                   size_t max_byte_size);
-
-  ~ParNewGeneration() {
-    for (uint i = 0; i < ParallelGCThreads; i++)
-        delete _task_queues->queue(i);
-
-    delete _task_queues;
-  }
-
-  virtual void ref_processor_init();
-  virtual Generation::Name kind()        { return Generation::ParNew; }
-  virtual const char* name() const;
-  virtual const char* short_name() const { return "ParNew"; }
-
-  // override
-  virtual bool refs_discovery_is_mt()     const {
-    return ParallelGCThreads > 1;
-  }
-
-  // Make the collection virtual.
-  virtual void collect(bool   full,
-                       bool   clear_all_soft_refs,
-                       size_t size,
-                       bool   is_tlab);
-
-  // This needs to be visible to the closure function.
-  // "obj" is the object to be copied, "m" is a recent value of its mark
-  // that must not contain a forwarding pointer (though one might be
-  // inserted in "obj"s mark word by a parallel thread).
-  oop copy_to_survivor_space(ParScanThreadState* par_scan_state,
-                             oop obj, size_t obj_sz, markWord m);
-
-  // in support of testing overflow code
-  NOT_PRODUCT(int _overflow_counter;)
-  NOT_PRODUCT(bool should_simulate_overflow();)
-
-  // Accessor for overflow list
-  oop overflow_list() { return _overflow_list; }
-
-  // Push the given (from-space) object on the global overflow list.
-  void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state);
-
-  // If the global overflow list is non-empty, move some tasks from it
-  // onto "work_q" (which need not be empty).  No more than 1/4 of the
-  // available space on "work_q" is used.
-  bool take_from_overflow_list(ParScanThreadState* par_scan_state);
-  bool take_from_overflow_list_work(ParScanThreadState* par_scan_state);
-
-  // The task queues to be used by parallel GC threads.
-  ObjToScanQueueSet* task_queues() {
-    return _task_queues;
-  }
-
-  PLABStats* plab_stats() {
-    return &_plab_stats;
-  }
-
-  size_t desired_plab_sz();
-
-  const ParNewTracer* gc_tracer() const {
-    return &_gc_tracer;
-  }
-
-  static oop real_forwardee(oop obj);
-};
-
-#endif // SHARE_GC_CMS_PARNEWGENERATION_HPP
--- a/src/hotspot/share/gc/cms/parNewGeneration.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PARNEWGENERATION_INLINE_HPP
-#define SHARE_GC_CMS_PARNEWGENERATION_INLINE_HPP
-
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/shared/plab.inline.hpp"
-#include "utilities/globalDefinitions.hpp"
-
-inline HeapWord* ParScanThreadState::alloc_in_to_space(size_t word_sz) {
-  HeapWord* obj = to_space_alloc_buffer()->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
-  if (obj != NULL) return obj;
-  else return alloc_in_to_space_slow(word_sz);
-}
-#endif // SHARE_GC_CMS_PARNEWGENERATION_INLINE_HPP
--- a/src/hotspot/share/gc/cms/parOopClosures.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PAROOPCLOSURES_HPP
-#define SHARE_GC_CMS_PAROOPCLOSURES_HPP
-
-#include "gc/shared/genOopClosures.hpp"
-#include "gc/shared/taskqueue.hpp"
-#include "memory/padded.hpp"
-
-// Closures for ParNewGeneration
-
-class ParScanThreadState;
-class ParNewGeneration;
-typedef Padded<OopTaskQueue> ObjToScanQueue;
-typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
-class ParallelTaskTerminator;
-
-class ParScanClosure: public OopsInClassLoaderDataOrGenClosure {
- protected:
-  ParScanThreadState* _par_scan_state;
-  ParNewGeneration*   _g;
-  HeapWord*           _boundary;
-  template <class T> void inline par_do_barrier(T* p);
-  template <class T> void inline do_oop_work(T* p,
-                                             bool gc_barrier,
-                                             bool root_scan);
- public:
-  ParScanClosure(ParNewGeneration* g, ParScanThreadState* par_scan_state);
-};
-
-class ParScanWithBarrierClosure: public ParScanClosure {
- public:
-  ParScanWithBarrierClosure(ParNewGeneration* g,
-                            ParScanThreadState* par_scan_state) :
-    ParScanClosure(g, par_scan_state) {}
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class ParScanWithoutBarrierClosure: public ParScanClosure {
- public:
-  ParScanWithoutBarrierClosure(ParNewGeneration* g,
-                               ParScanThreadState* par_scan_state) :
-    ParScanClosure(g, par_scan_state) {}
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class ParRootScanWithBarrierTwoGensClosure: public ParScanClosure {
- public:
-  ParRootScanWithBarrierTwoGensClosure(ParNewGeneration* g,
-                                       ParScanThreadState* par_scan_state) :
-    ParScanClosure(g, par_scan_state) {}
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class ParRootScanWithoutBarrierClosure: public ParScanClosure {
- public:
-  ParRootScanWithoutBarrierClosure(ParNewGeneration* g,
-                                   ParScanThreadState* par_scan_state) :
-    ParScanClosure(g, par_scan_state) {}
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class ParScanWeakRefClosure: public ScanWeakRefClosure {
- protected:
-  ParScanThreadState* _par_scan_state;
-  template <class T> inline void do_oop_work(T* p);
- public:
-  ParScanWeakRefClosure(ParNewGeneration* g,
-                        ParScanThreadState* par_scan_state);
-  virtual void do_oop(oop* p);
-  virtual void do_oop(narrowOop* p);
-};
-
-class ParEvacuateFollowersClosure: public VoidClosure {
- private:
-  ParScanThreadState* _par_scan_state;
-  ParScanThreadState* par_scan_state() { return _par_scan_state; }
-
-  // We want to preserve the specific types here (rather than "OopClosure")
-  // for later de-virtualization of do_oop calls.
-  ParScanWithoutBarrierClosure* _to_space_closure;
-  ParScanWithoutBarrierClosure* to_space_closure() {
-    return _to_space_closure;
-  }
-  ParRootScanWithoutBarrierClosure* _to_space_root_closure;
-  ParRootScanWithoutBarrierClosure* to_space_root_closure() {
-    return _to_space_root_closure;
-  }
-
-  ParScanWithBarrierClosure* _old_gen_closure;
-  ParScanWithBarrierClosure* old_gen_closure () {
-    return _old_gen_closure;
-  }
-  ParRootScanWithBarrierTwoGensClosure* _old_gen_root_closure;
-  ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure () {
-    return _old_gen_root_closure;
-  }
-
-  ParNewGeneration* _par_gen;
-  ParNewGeneration* par_gen() { return _par_gen; }
-
-  ObjToScanQueueSet*  _task_queues;
-  ObjToScanQueueSet*  task_queues() { return _task_queues; }
-
-  ParallelTaskTerminator* _terminator;
-  ParallelTaskTerminator* terminator() { return _terminator; }
- public:
-  ParEvacuateFollowersClosure(
-    ParScanThreadState* par_scan_state_,
-    ParScanWithoutBarrierClosure* to_space_closure_,
-    ParScanWithBarrierClosure* old_gen_closure_,
-    ParRootScanWithoutBarrierClosure* to_space_root_closure_,
-    ParNewGeneration* par_gen_,
-    ParRootScanWithBarrierTwoGensClosure* old_gen_root_closure_,
-    ObjToScanQueueSet* task_queues_,
-    ParallelTaskTerminator* terminator_);
-  virtual void do_void();
-};
-
-#endif // SHARE_GC_CMS_PAROOPCLOSURES_HPP
--- a/src/hotspot/share/gc/cms/parOopClosures.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PAROOPCLOSURES_INLINE_HPP
-#define SHARE_GC_CMS_PAROOPCLOSURES_INLINE_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#include "gc/cms/parOopClosures.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/genOopClosures.inline.hpp"
-#include "logging/log.hpp"
-#include "logging/logStream.hpp"
-#include "oops/access.inline.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/oop.inline.hpp"
-
-template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
-  oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
-  // weak references are sometimes scanned twice; must check
-  // that to-space doesn't already contain this object
-  if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
-    // we need to ensure that it is copied (see comment in
-    // ParScanClosure::do_oop_work).
-    Klass* objK = obj->klass();
-    markWord m = obj->mark_raw();
-    oop new_obj;
-    if (m.is_marked()) { // Contains forwarding pointer.
-      new_obj = ParNewGeneration::real_forwardee(obj);
-    } else {
-      size_t obj_sz = obj->size_given_klass(objK);
-      new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
-                                                                obj, obj_sz, m);
-    }
-    RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
-  }
-}
-
-inline void ParScanWeakRefClosure::do_oop(oop* p)       { ParScanWeakRefClosure::do_oop_work(p); }
-inline void ParScanWeakRefClosure::do_oop(narrowOop* p) { ParScanWeakRefClosure::do_oop_work(p); }
-
-template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
-  assert(generation()->is_in_reserved(p), "expected ref in generation");
-  oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
-  // If p points to a younger generation, mark the card.
-  if ((HeapWord*)obj < gen_boundary()) {
-    rs()->write_ref_field_gc_par(p, obj);
-  }
-}
-
-template <class T>
-inline void ParScanClosure::do_oop_work(T* p,
-                                        bool gc_barrier,
-                                        bool root_scan) {
-  assert((!CMSHeap::heap()->is_in_reserved(p) ||
-          generation()->is_in_reserved(p))
-         && (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
-         "The gen must be right, and we must be doing the barrier "
-         "in older generations.");
-  T heap_oop = RawAccess<>::oop_load(p);
-  if (!CompressedOops::is_null(heap_oop)) {
-    oop obj = CompressedOops::decode_not_null(heap_oop);
-    if ((HeapWord*)obj < _boundary) {
-#ifndef PRODUCT
-      if (_g->to()->is_in_reserved(obj)) {
-        Log(gc) log;
-        log.error("Scanning field (" PTR_FORMAT ") twice?", p2i(p));
-        CMSHeap* heap = CMSHeap::heap();
-        Space* sp = heap->space_containing(p);
-        oop obj = oop(sp->block_start(p));
-        assert((HeapWord*)obj < (HeapWord*)p, "Error");
-        log.error("Object: " PTR_FORMAT, p2i((void *)obj));
-        log.error("-------");
-        LogStream ls(log.error());
-        obj->print_on(&ls);
-        log.error("-----");
-        log.error("Heap:");
-        log.error("-----");
-        heap->print_on(&ls);
-        ShouldNotReachHere();
-      }
-#endif
-      // OK, we need to ensure that it is copied.
-      // We read the klass and mark in this order, so that we can reliably
-      // get the size of the object: if the mark we read is not a
-      // forwarding pointer, then the klass is valid: the klass is only
-      // overwritten with an overflow next pointer after the object is
-      // forwarded.
-      Klass* objK = obj->klass();
-      markWord m = obj->mark_raw();
-      oop new_obj;
-      if (m.is_marked()) { // Contains forwarding pointer.
-        new_obj = ParNewGeneration::real_forwardee(obj);
-        RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
-        log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
-                                        "forwarded ",
-                                        new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
-      } else {
-        size_t obj_sz = obj->size_given_klass(objK);
-        new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
-        RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
-        if (root_scan) {
-          // This may have pushed an object.  If we have a root
-          // category with a lot of roots, can't let the queue get too
-          // full:
-          (void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
-        }
-      }
-      if (is_scanning_a_cld()) {
-        do_cld_barrier();
-      } else if (gc_barrier) {
-        // Now call parent closure
-        par_do_barrier(p);
-      }
-    }
-  }
-}
-
-inline void ParScanWithBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, true, false); }
-inline void ParScanWithBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, true, false); }
-
-inline void ParScanWithoutBarrierClosure::do_oop(oop* p)       { ParScanClosure::do_oop_work(p, false, false); }
-inline void ParScanWithoutBarrierClosure::do_oop(narrowOop* p) { ParScanClosure::do_oop_work(p, false, false); }
-
-#endif // SHARE_GC_CMS_PAROOPCLOSURES_INLINE_HPP
--- a/src/hotspot/share/gc/cms/promotionInfo.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,308 +0,0 @@
-/*
- * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/promotionInfo.hpp"
-#include "gc/shared/genOopClosures.hpp"
-#include "oops/compressedOops.inline.hpp"
-#include "oops/markWord.inline.hpp"
-#include "oops/oop.inline.hpp"
-
-/////////////////////////////////////////////////////////////////////////
-//// PromotionInfo
-/////////////////////////////////////////////////////////////////////////
-
-
-PromotedObject* PromotedObject::next() const {
-  assert(!((FreeChunk*)this)->is_free(), "Error");
-  PromotedObject* res;
-  if (UseCompressedOops) {
-    // The next pointer is a compressed oop stored in the top 32 bits
-    res = (PromotedObject*)CompressedOops::decode(_data._narrow_next);
-  } else {
-    res = (PromotedObject*)(_next & next_mask);
-  }
-  assert(oopDesc::is_oop_or_null(oop(res), true /* ignore mark word */), "Expected an oop or NULL at " PTR_FORMAT, p2i(oop(res)));
-  return res;
-}
-
-inline void PromotedObject::setNext(PromotedObject* x) {
-  assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
-         "or insufficient alignment of objects");
-  if (UseCompressedOops) {
-    assert(_data._narrow_next == 0, "Overwrite?");
-    _data._narrow_next = CompressedOops::encode(oop(x));
-  } else {
-    _next |= (intptr_t)x;
-  }
-  assert(!((FreeChunk*)this)->is_free(), "Error");
-}
-
-// Return the next displaced header, incrementing the pointer and
-// recycling spool area as necessary.
-markWord PromotionInfo::nextDisplacedHeader() {
-  assert(_spoolHead != NULL, "promotionInfo inconsistency");
-  assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
-         "Empty spool space: no displaced header can be fetched");
-  assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
-  markWord hdr = _spoolHead->displacedHdr[_firstIndex];
-  // Spool forward
-  if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
-    // forward to next block, recycling this block into spare spool buffer
-    SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
-    assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
-    _spoolHead->nextSpoolBlock = _spareSpool;
-    _spareSpool = _spoolHead;
-    _spoolHead = tmp;
-    _firstIndex = 1;
-    NOT_PRODUCT(
-      if (_spoolHead == NULL) {  // all buffers fully consumed
-        assert(_spoolTail == NULL && _nextIndex == 1,
-               "spool buffers processing inconsistency");
-      }
-    )
-  }
-  return hdr;
-}
-
-void PromotionInfo::track(PromotedObject* trackOop) {
-  track(trackOop, oop(trackOop)->klass());
-}
-
-void PromotionInfo::track(PromotedObject* trackOop, Klass* klassOfOop) {
-  // make a copy of header as it may need to be spooled
-  markWord mark = oop(trackOop)->mark_raw();
-  trackOop->clear_next();
-  if (mark.must_be_preserved_for_cms_scavenge(klassOfOop)) {
-    // save non-prototypical header, and mark oop
-    saveDisplacedHeader(mark);
-    trackOop->setDisplacedMark();
-  } else {
-    // we'd like to assert something like the following:
-    // assert(mark == markWord::prototype(), "consistency check");
-    // ... but the above won't work because the age bits have not (yet) been
-    // cleared. The remainder of the check would be identical to the
-    // condition checked in must_be_preserved() above, so we don't really
-    // have anything useful to check here!
-  }
-  if (_promoTail != NULL) {
-    assert(_promoHead != NULL, "List consistency");
-    _promoTail->setNext(trackOop);
-    _promoTail = trackOop;
-  } else {
-    assert(_promoHead == NULL, "List consistency");
-    _promoHead = _promoTail = trackOop;
-  }
-  // Mask as newly promoted, so we can skip over such objects
-  // when scanning dirty cards
-  assert(!trackOop->hasPromotedMark(), "Should not have been marked");
-  trackOop->setPromotedMark();
-}
-
-// Save the given displaced header, incrementing the pointer and
-// obtaining more spool area as necessary.
-void PromotionInfo::saveDisplacedHeader(markWord hdr) {
-  assert(_spoolHead != NULL && _spoolTail != NULL,
-         "promotionInfo inconsistency");
-  assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
-  _spoolTail->displacedHdr[_nextIndex] = hdr;
-  // Spool forward
-  if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
-    // get a new spooling block
-    assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
-    _splice_point = _spoolTail;                   // save for splicing
-    _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
-    _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
-    // ... but will attempt filling before next promotion attempt
-    _nextIndex = 1;
-  }
-}
-
-// Ensure that spooling space exists. Return false if spooling space
-// could not be obtained.
-bool PromotionInfo::ensure_spooling_space_work() {
-  assert(!has_spooling_space(), "Only call when there is no spooling space");
-  // Try and obtain more spooling space
-  SpoolBlock* newSpool = getSpoolBlock();
-  assert(newSpool == NULL ||
-         (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
-        "getSpoolBlock() sanity check");
-  if (newSpool == NULL) {
-    return false;
-  }
-  _nextIndex = 1;
-  if (_spoolTail == NULL) {
-    _spoolTail = newSpool;
-    if (_spoolHead == NULL) {
-      _spoolHead = newSpool;
-      _firstIndex = 1;
-    } else {
-      assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
-             "Splice point invariant");
-      // Extra check that _splice_point is connected to list
-      #ifdef ASSERT
-      {
-        SpoolBlock* blk = _spoolHead;
-        for (; blk->nextSpoolBlock != NULL;
-             blk = blk->nextSpoolBlock);
-        assert(blk != NULL && blk == _splice_point,
-               "Splice point incorrect");
-      }
-      #endif // ASSERT
-      _splice_point->nextSpoolBlock = newSpool;
-    }
-  } else {
-    assert(_spoolHead != NULL, "spool list consistency");
-    _spoolTail->nextSpoolBlock = newSpool;
-    _spoolTail = newSpool;
-  }
-  return true;
-}
-
-// Get a free spool buffer from the free pool, getting a new block
-// from the heap if necessary.
-SpoolBlock* PromotionInfo::getSpoolBlock() {
-  SpoolBlock* res;
-  if ((res = _spareSpool) != NULL) {
-    _spareSpool = _spareSpool->nextSpoolBlock;
-    res->nextSpoolBlock = NULL;
-  } else {  // spare spool exhausted, get some from heap
-    res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
-    if (res != NULL) {
-      res->init();
-    }
-  }
-  assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
-  return res;
-}
-
-void PromotionInfo::startTrackingPromotions() {
-  assert(noPromotions(), "sanity");
-  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
-         "spooling inconsistency?");
-  _firstIndex = _nextIndex = 1;
-  _tracking = true;
-}
-
-void PromotionInfo::stopTrackingPromotions() {
-  assert(noPromotions(), "we should have torn down the lists by now");
-  assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
-         "spooling inconsistency?");
-  _firstIndex = _nextIndex = 1;
-  _tracking = false;
-}
-
-// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
-// points to the next slot available for filling.
-// The set of slots holding displaced headers are then all those in the
-// right-open interval denoted by:
-//
-//    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
-//
-// When _spoolTail is NULL, then the set of slots with displaced headers
-// is all those starting at the slot <_spoolHead, _firstIndex> and
-// going up to the last slot of last block in the linked list.
-// In this latter case, _splice_point points to the tail block of
-// this linked list of blocks holding displaced headers.
-void PromotionInfo::verify() const {
-  // Verify the following:
-  // 1. the number of displaced headers matches the number of promoted
-  //    objects that have displaced headers
-  // 2. each promoted object lies in this space
-  debug_only(
-    PromotedObject* junk = NULL;
-    assert(junk->next_addr() == (void*)(oop(junk)->mark_addr_raw()),
-           "Offset of PromotedObject::_next is expected to align with "
-           "  the OopDesc::_mark within OopDesc");
-  )
-  // FIXME: guarantee????
-  guarantee(_spoolHead == NULL || _spoolTail != NULL ||
-            _splice_point != NULL, "list consistency");
-  guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
-  // count the number of objects with displaced headers
-  size_t numObjsWithDisplacedHdrs = 0;
-  for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
-    guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
-    // the last promoted object may fail the mark() != NULL test of is_oop().
-    guarantee(curObj->next() == NULL || oopDesc::is_oop(oop(curObj)), "must be an oop");
-    if (curObj->hasDisplacedMark()) {
-      numObjsWithDisplacedHdrs++;
-    }
-  }
-  // Count the number of displaced headers
-  size_t numDisplacedHdrs = 0;
-  for (SpoolBlock* curSpool = _spoolHead;
-       curSpool != _spoolTail && curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    // the first entry is just a self-pointer; indices 1 through
-    // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
-    guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
-              "first entry of displacedHdr should be self-referential");
-    numDisplacedHdrs += curSpool->bufferSize - 1;
-  }
-  guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
-            "internal consistency");
-  guarantee(_spoolTail != NULL || _nextIndex == 1,
-            "Inconsistency between _spoolTail and _nextIndex");
-  // We overcounted (_firstIndex-1) worth of slots in block
-  // _spoolHead and we undercounted (_nextIndex-1) worth of
-  // slots in block _spoolTail. We make an appropriate
-  // adjustment by subtracting the first and adding the
-  // second:  - (_firstIndex - 1) + (_nextIndex - 1)
-  numDisplacedHdrs += (_nextIndex - _firstIndex);
-  guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
-}
-
-void PromotionInfo::print_on(outputStream* st) const {
-  SpoolBlock* curSpool = NULL;
-  size_t i = 0;
-  st->print_cr(" start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
-               _firstIndex, _nextIndex);
-  for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    curSpool->print_on(st);
-    st->print_cr(" active ");
-    i++;
-  }
-  for (curSpool = _spoolTail; curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    curSpool->print_on(st);
-    st->print_cr(" inactive ");
-    i++;
-  }
-  for (curSpool = _spareSpool; curSpool != NULL;
-       curSpool = curSpool->nextSpoolBlock) {
-    curSpool->print_on(st);
-    st->print_cr(" free ");
-    i++;
-  }
-  st->print_cr("  " SIZE_FORMAT " header spooling blocks", i);
-}
-
-void SpoolBlock::print_on(outputStream* st) const {
-  st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
-            p2i(this), p2i((HeapWord*)displacedHdr + bufferSize),
-            bufferSize, p2i(nextSpoolBlock));
-}
--- a/src/hotspot/share/gc/cms/promotionInfo.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PROMOTIONINFO_HPP
-#define SHARE_GC_CMS_PROMOTIONINFO_HPP
-
-#include "gc/cms/freeChunk.hpp"
-
-// Forward declarations
-class CompactibleFreeListSpace;
-
-class PromotedObject {
- private:
-  enum {
-    promoted_mask  = right_n_bits(2),   // i.e. 0x3
-    displaced_mark = nth_bit(2),        // i.e. 0x4
-    next_mask      = ~(right_n_bits(3)) // i.e. ~(0x7)
-  };
-
-  // Below, we want _narrow_next in the "higher" 32 bit slot,
-  // whose position will depend on endian-ness of the platform.
-  // This is so that there is no interference with the
-  // cms_free_bit occupying bit position 7 (lsb == 0)
-  // when we are using compressed oops; see FreeChunk::is_free().
-  // We cannot move the cms_free_bit down because currently
-  // biased locking code assumes that age bits are contiguous
-  // with the lock bits. Even if that assumption were relaxed,
-  // the least position we could move this bit to would be
-  // to bit position 3, which would require 16 byte alignment.
-  typedef struct {
-#ifdef VM_LITTLE_ENDIAN
-    LP64_ONLY(narrowOop _pad;)
-              narrowOop _narrow_next;
-#else
-              narrowOop _narrow_next;
-    LP64_ONLY(narrowOop _pad;)
-#endif
-  } Data;
-
-  union {
-    intptr_t _next;
-    Data     _data;
-  };
- public:
-  PromotedObject* next() const;
-  void setNext(PromotedObject* x);
-  inline void setPromotedMark() {
-    _next |= promoted_mask;
-    assert(!((FreeChunk*)this)->is_free(), "Error");
-  }
-  inline bool hasPromotedMark() const {
-    assert(!((FreeChunk*)this)->is_free(), "Error");
-    return (_next & promoted_mask) == promoted_mask;
-  }
-  inline void setDisplacedMark() {
-    _next |= displaced_mark;
-    assert(!((FreeChunk*)this)->is_free(), "Error");
-  }
-  inline bool hasDisplacedMark() const {
-    assert(!((FreeChunk*)this)->is_free(), "Error");
-    return (_next & displaced_mark) != 0;
-  }
-  inline void clear_next()        {
-    _next = 0;
-    assert(!((FreeChunk*)this)->is_free(), "Error");
-  }
-  debug_only(void *next_addr() { return (void *) &_next; })
-};
-
-class SpoolBlock: public FreeChunk {
-  friend class PromotionInfo;
- protected:
-  SpoolBlock*  nextSpoolBlock;
-  size_t       bufferSize;        // number of usable words in this block
-  markWord*    displacedHdr;      // the displaced headers start here
-
-  // Note about bufferSize: it denotes the number of entries available plus 1;
-  // legal indices range from 1 through BufferSize - 1.  See the verification
-  // code verify() that counts the number of displaced headers spooled.
-  size_t computeBufferSize() {
-    return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markWord);
-  }
-
- public:
-  void init() {
-    bufferSize = computeBufferSize();
-    displacedHdr = (markWord*)&displacedHdr;
-    nextSpoolBlock = NULL;
-  }
-
-  void print_on(outputStream* st) const;
-  void print() const { print_on(tty); }
-};
-
-class PromotionInfo {
-  bool            _tracking;      // set if tracking
-  CompactibleFreeListSpace* _space; // the space to which this belongs
-  PromotedObject* _promoHead;     // head of list of promoted objects
-  PromotedObject* _promoTail;     // tail of list of promoted objects
-  SpoolBlock*     _spoolHead;     // first spooling block
-  SpoolBlock*     _spoolTail;     // last  non-full spooling block or null
-  SpoolBlock*     _splice_point;  // when _spoolTail is null, holds list tail
-  SpoolBlock*     _spareSpool;    // free spool buffer
-  size_t          _firstIndex;    // first active index in
-                                  // first spooling block (_spoolHead)
-  size_t          _nextIndex;     // last active index + 1 in last
-                                  // spooling block (_spoolTail)
- private:
-  // ensure that spooling space exists; return true if there is spooling space
-  bool ensure_spooling_space_work();
-
- public:
-  PromotionInfo() :
-    _tracking(0), _space(NULL),
-    _promoHead(NULL), _promoTail(NULL),
-    _spoolHead(NULL), _spoolTail(NULL),
-    _spareSpool(NULL), _firstIndex(1),
-    _nextIndex(1) {}
-
-  bool noPromotions() const {
-    assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
-    return _promoHead == NULL;
-  }
-  void startTrackingPromotions();
-  void stopTrackingPromotions();
-  bool tracking() const          { return _tracking;  }
-  void track(PromotedObject* trackOop);      // keep track of a promoted oop
-  // The following variant must be used when trackOop is not fully
-  // initialized and has a NULL klass:
-  void track(PromotedObject* trackOop, Klass* klassOfOop); // keep track of a promoted oop
-  void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
-  CompactibleFreeListSpace* space() const     { return _space; }
-  markWord nextDisplacedHeader(); // get next header & forward spool pointer
-  void    saveDisplacedHeader(markWord hdr);
-                                 // save header and forward spool
-
-  inline size_t refillSize() const;
-
-  SpoolBlock* getSpoolBlock();   // return a free spooling block
-  inline bool has_spooling_space() {
-    return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
-  }
-  // ensure that spooling space exists
-  bool ensure_spooling_space() {
-    return has_spooling_space() || ensure_spooling_space_work();
-  }
-
-  template <typename OopClosureType>
-  void promoted_oops_iterate(OopClosureType* cl);
-
-  void verify()  const;
-  void reset() {
-    _promoHead = NULL;
-    _promoTail = NULL;
-    _spoolHead = NULL;
-    _spoolTail = NULL;
-    _spareSpool = NULL;
-    _firstIndex = 0;
-    _nextIndex = 0;
-
-  }
-
-  void print_on(outputStream* st) const;
-};
-
-
-#endif // SHARE_GC_CMS_PROMOTIONINFO_HPP
--- a/src/hotspot/share/gc/cms/promotionInfo.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_PROMOTIONINFO_INLINE_HPP
-#define SHARE_GC_CMS_PROMOTIONINFO_INLINE_HPP
-
-#include "gc/cms/promotionInfo.hpp"
-#include "oops/oop.inline.hpp"
-#include "utilities/debug.hpp"
-#include "utilities/macros.hpp"
-
-//////////////////////////////////////////////////////////////////////////////
-// We go over the list of promoted objects, removing each from the list,
-// and applying the closure (this may, in turn, add more elements to
-// the tail of the promoted list, and these newly added objects will
-// also be processed) until the list is empty.
-// To aid verification and debugging, in the non-product builds
-// we actually forward _promoHead each time we process a promoted oop.
-// Note that this is not necessary in general (i.e. when we don't need to
-// call PromotionInfo::verify()) because oop_iterate can only add to the
-// end of _promoTail, and never needs to look at _promoHead.
-
-template <typename OopClosureType>
-void PromotionInfo::promoted_oops_iterate(OopClosureType* cl) {
-  NOT_PRODUCT(verify());
-  PromotedObject *curObj, *nextObj;
-  for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {
-    if ((nextObj = curObj->next()) == NULL) {
-      /* protect ourselves against additions due to closure application
-         below by resetting the list.  */
-      assert(_promoTail == curObj, "Should have been the tail");
-      _promoHead = _promoTail = NULL;
-    }
-    if (curObj->hasDisplacedMark()) {
-      /* restore displaced header */
-      oop(curObj)->set_mark_raw(nextDisplacedHeader());
-    } else {
-      /* restore prototypical header */
-      oop(curObj)->init_mark_raw();
-    }
-    /* The "promoted_mark" should now not be set */
-    assert(!curObj->hasPromotedMark(),
-           "Should have been cleared by restoring displaced mark-word");
-    NOT_PRODUCT(_promoHead = nextObj);
-    if (cl != NULL) oop(curObj)->oop_iterate(cl);
-    if (nextObj == NULL) { /* start at head of list reset above */
-      nextObj = _promoHead;
-    }
-  }
-  assert(noPromotions(), "post-condition violation");
-  assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");
-  assert(_spoolHead == _spoolTail, "emptied spooling buffers");
-  assert(_firstIndex == _nextIndex, "empty buffer");
-}
-
-#endif // SHARE_GC_CMS_PROMOTIONINFO_INLINE_HPP
--- a/src/hotspot/share/gc/cms/vmStructs_cms.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-/*
- * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_VMSTRUCTS_CMS_HPP
-#define SHARE_GC_CMS_VMSTRUCTS_CMS_HPP
-
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/compactibleFreeListSpace.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-
-#define VM_STRUCTS_CMSGC(nonstatic_field,                                                                                            \
-                         volatile_nonstatic_field,                                                                                   \
-                         static_field)                                                                                               \
-  nonstatic_field(CompactibleFreeListSpace,    _collector,                                    CMSCollector*)                         \
-  nonstatic_field(CompactibleFreeListSpace,    _bt,                                           BlockOffsetArrayNonContigSpace)        \
-     static_field(CompactibleFreeListSpace,    _min_chunk_size_in_bytes,                      size_t)                                \
-  nonstatic_field(CMSBitMap,                   _bmStartWord,                                  HeapWord*)                             \
-  nonstatic_field(CMSBitMap,                   _bmWordSize,                                   size_t)                                \
-  nonstatic_field(CMSBitMap,                   _shifter,                                      const int)                             \
-  nonstatic_field(CMSBitMap,                   _bm,                                           BitMapView)                            \
-  nonstatic_field(CMSBitMap,                   _virtual_space,                                VirtualSpace)                          \
-  nonstatic_field(CMSCollector,                _markBitMap,                                   CMSBitMap)                             \
-  nonstatic_field(ConcurrentMarkSweepGeneration, _cmsSpace,                                   CompactibleFreeListSpace*)             \
-     static_field(ConcurrentMarkSweepThread,   _collector,                                    CMSCollector*)                         \
-  nonstatic_field(LinearAllocBlock,            _word_size,                                    size_t)                                \
-  nonstatic_field(AFLBinaryTreeDictionary,     _total_size,                                   size_t)                                \
-  nonstatic_field(CompactibleFreeListSpace,    _dictionary,                                   AFLBinaryTreeDictionary*)              \
-  nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           AdaptiveFreeList<FreeChunk>)           \
-  nonstatic_field(CompactibleFreeListSpace,    _smallLinearAllocBlock,                        LinearAllocBlock)                      \
-  volatile_nonstatic_field(FreeChunk,          _size,                                         size_t)                                \
-  nonstatic_field(FreeChunk,                   _next,                                         FreeChunk*)                            \
-  nonstatic_field(FreeChunk,                   _prev,                                         FreeChunk*)                            \
-  nonstatic_field(AdaptiveFreeList<FreeChunk>, _size,                                         size_t)                                \
-  nonstatic_field(AdaptiveFreeList<FreeChunk>, _count,                                        ssize_t)
-
-
-
-#define VM_TYPES_CMSGC(declare_type,                                      \
-                       declare_toplevel_type,                             \
-                       declare_integer_type)                              \
-                                                                          \
-           declare_type(CMSHeap,                      GenCollectedHeap)   \
-           declare_type(ConcurrentMarkSweepGeneration,CardGeneration)     \
-           declare_type(ParNewGeneration,             DefNewGeneration)   \
-           declare_type(CompactibleFreeListSpace,     CompactibleSpace)   \
-           declare_type(ConcurrentMarkSweepThread,    NamedThread)        \
-  declare_toplevel_type(CMSCollector)                                     \
-  declare_toplevel_type(CMSBitMap)                                        \
-  declare_toplevel_type(FreeChunk)                                        \
-  declare_toplevel_type(metaspace::Metablock)                             \
-  declare_toplevel_type(ConcurrentMarkSweepThread*)                       \
-  declare_toplevel_type(ConcurrentMarkSweepGeneration*)                   \
-  declare_toplevel_type(CompactibleFreeListSpace*)                        \
-  declare_toplevel_type(CMSCollector*)                                    \
-  declare_toplevel_type(AFLBinaryTreeDictionary)                          \
-  declare_toplevel_type(LinearAllocBlock)                                 \
-  declare_toplevel_type(FreeChunk*)                                       \
-  declare_toplevel_type(AdaptiveFreeList<FreeChunk>*)                     \
-  declare_toplevel_type(AdaptiveFreeList<FreeChunk>)
-
-
-#define VM_INT_CONSTANTS_CMSGC(declare_constant,                          \
-                               declare_constant_with_value)               \
-  declare_constant(CompactibleFreeListSpace::IndexSetSize)                \
-  declare_constant(Generation::ConcurrentMarkSweep)                       \
-  declare_constant(Generation::ParNew)
-
-#endif // SHARE_GC_CMS_VMSTRUCTS_CMS_HPP
--- a/src/hotspot/share/gc/cms/yieldingWorkgroup.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,399 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/yieldingWorkgroup.hpp"
-#include "gc/shared/gcId.hpp"
-#include "utilities/macros.hpp"
-
-YieldingFlexibleGangWorker::YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id)
-    : AbstractGangWorker(gang, id) {}
-
-YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
-    const char* name, uint workers, bool are_GC_task_threads) :
-         AbstractWorkGang(name, workers, are_GC_task_threads, false),
-         _yielded_workers(0),
-         _started_workers(0),
-         _finished_workers(0),
-         _sequence_number(0),
-         _task(NULL) {
-
-  // Other initialization.
-  _monitor = new Monitor(/* priority */       Mutex::leaf,
-                         /* name */           "WorkGroup monitor",
-                         /* allow_vm_block */ are_GC_task_threads,
-                                              Monitor::_safepoint_check_never);
-
-  assert(monitor() != NULL, "Failed to allocate monitor");
-}
-
-AbstractGangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
-  return new YieldingFlexibleGangWorker(this, which);
-}
-
-void YieldingFlexibleWorkGang::internal_worker_poll(YieldingWorkData* data) const {
-  assert(data != NULL, "worker data is null");
-  data->set_task(task());
-  data->set_sequence_number(sequence_number());
-}
-
-void YieldingFlexibleWorkGang::internal_note_start() {
-  assert(monitor()->owned_by_self(), "note_finish is an internal method");
-  _started_workers += 1;
-}
-
-void YieldingFlexibleWorkGang::internal_note_finish() {
-  assert(monitor()->owned_by_self(), "note_finish is an internal method");
-  _finished_workers += 1;
-}
-
-// Run a task; returns when the task is done, or the workers yield,
-// or the task is aborted.
-// A task that has been yielded can be continued via this interface
-// by using the same task repeatedly as the argument to the call.
-// It is expected that the YieldingFlexibleGangTask carries the appropriate
-// continuation information used by workers to continue the task
-// from its last yield point. Thus, a completed task will return
-// immediately with no actual work having been done by the workers.
-/////////////////////
-// Implementatiuon notes: remove before checking XXX
-/*
-Each gang is working on a task at a certain time.
-Some subset of workers may have yielded and some may
-have finished their quota of work. Until this task has
-been completed, the workers are bound to that task.
-Once the task has been completed, the gang unbounds
-itself from the task.
-
-The yielding work gang thus exports two invokation
-interfaces: run_task() and continue_task(). The
-first is used to initiate a new task and bind it
-to the workers; the second is used to continue an
-already bound task that has yielded. Upon completion
-the binding is released and a new binding may be
-created.
-
-The shape of a yielding work gang is as follows:
-
-Overseer invokes run_task(*task).
-   Lock gang monitor
-   Check that there is no existing binding for the gang
-   If so, abort with an error
-   Else, create a new binding of this gang to the given task
-   Set number of active workers (as asked)
-   Notify workers that work is ready to be done
-     [the requisite # workers would then start up
-      and do the task]
-   Wait on the monitor until either
-     all work is completed or the task has yielded
-     -- this is normally done through
-        yielded + completed == active
-        [completed workers are rest to idle state by overseer?]
-   return appropriate status to caller
-
-Overseer invokes continue_task(*task),
-   Lock gang monitor
-   Check that task is the same as current binding
-   If not, abort with an error
-   Else, set the number of active workers as requested?
-   Notify workers that they can continue from yield points
-    New workers can also start up as required
-      while satisfying the constraint that
-         active + yielded does not exceed required number
-   Wait (as above).
-
-NOTE: In the above, for simplicity in a first iteration
-  our gangs will be of fixed population and will not
-  therefore be flexible work gangs, just yielding work
-  gangs. Once this works well, we will in a second
-  iteration.refinement introduce flexibility into
-  the work gang.
-
-NOTE: we can always create a new gang per each iteration
-  in order to get the flexibility, but we will for now
-  desist that simplified route.
-
- */
-/////////////////////
-void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
-  MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
-  assert(task() == NULL, "Gang currently tied to a task");
-  assert(new_task != NULL, "Null task");
-  // Bind task to gang
-  _task = new_task;
-  new_task->set_gang(this);  // Establish 2-way binding to support yielding
-  _sequence_number++;
-
-  uint requested_size = new_task->requested_size();
-  if (requested_size != 0) {
-    _active_workers = MIN2(requested_size, total_workers());
-  } else {
-    _active_workers = active_workers();
-  }
-  new_task->set_actual_size(_active_workers);
-  new_task->set_for_termination(_active_workers);
-
-  assert(_started_workers == 0, "Tabula rasa non");
-  assert(_finished_workers == 0, "Tabula rasa non");
-  assert(_yielded_workers == 0, "Tabula rasa non");
-  yielding_task()->set_status(ACTIVE);
-
-  // Wake up all the workers, the first few will get to work,
-  // and the rest will go back to sleep
-  monitor()->notify_all();
-  wait_for_gang();
-}
-
-void YieldingFlexibleWorkGang::wait_for_gang() {
-
-  assert(monitor()->owned_by_self(), "Data race");
-  // Wait for task to complete or yield
-  for (Status status = yielding_task()->status();
-       status != COMPLETED && status != YIELDED && status != ABORTED;
-       status = yielding_task()->status()) {
-    assert(started_workers() <= active_workers(), "invariant");
-    assert(finished_workers() <= active_workers(), "invariant");
-    assert(yielded_workers() <= active_workers(), "invariant");
-    monitor()->wait_without_safepoint_check();
-  }
-  switch (yielding_task()->status()) {
-    case COMPLETED:
-    case ABORTED: {
-      assert(finished_workers() == active_workers(), "Inconsistent status");
-      assert(yielded_workers() == 0, "Invariant");
-      reset();   // for next task; gang<->task binding released
-      break;
-    }
-    case YIELDED: {
-      assert(yielded_workers() > 0, "Invariant");
-      assert(yielded_workers() + finished_workers() == active_workers(),
-             "Inconsistent counts");
-      break;
-    }
-    case ACTIVE:
-    case INACTIVE:
-    case COMPLETING:
-    case YIELDING:
-    case ABORTING:
-    default:
-      ShouldNotReachHere();
-  }
-}
-
-void YieldingFlexibleWorkGang::continue_task(
-  YieldingFlexibleGangTask* gang_task) {
-
-  MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
-  assert(task() != NULL && task() == gang_task, "Incorrect usage");
-  assert(_started_workers == _active_workers, "Precondition");
-  assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
-         "Else why are we calling continue_task()");
-  // Restart the yielded gang workers
-  yielding_task()->set_status(ACTIVE);
-  monitor()->notify_all();
-  wait_for_gang();
-}
-
-void YieldingFlexibleWorkGang::reset() {
-  _started_workers  = 0;
-  _finished_workers = 0;
-  yielding_task()->set_gang(NULL);
-  _task = NULL;    // unbind gang from task
-}
-
-void YieldingFlexibleWorkGang::yield() {
-  assert(task() != NULL, "Inconsistency; should have task binding");
-  MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
-  assert(yielded_workers() < active_workers(), "Consistency check");
-  if (yielding_task()->status() == ABORTING) {
-    // Do not yield; we need to abort as soon as possible
-    // XXX NOTE: This can cause a performance pathology in the
-    // current implementation in Mustang, as of today, and
-    // pre-Mustang in that as soon as an overflow occurs,
-    // yields will not be honoured. The right way to proceed
-    // of course is to fix bug # TBF, so that abort's cause
-    // us to return at each potential yield point.
-    return;
-  }
-  if (++_yielded_workers + finished_workers() == active_workers()) {
-    yielding_task()->set_status(YIELDED);
-    monitor()->notify_all();
-  } else {
-    yielding_task()->set_status(YIELDING);
-  }
-
-  while (true) {
-    switch (yielding_task()->status()) {
-      case YIELDING:
-      case YIELDED: {
-        monitor()->wait_without_safepoint_check();
-        break;  // from switch
-      }
-      case ACTIVE:
-      case ABORTING:
-      case COMPLETING: {
-        assert(_yielded_workers > 0, "Else why am i here?");
-        _yielded_workers--;
-        return;
-      }
-      case INACTIVE:
-      case ABORTED:
-      case COMPLETED:
-      default: {
-        ShouldNotReachHere();
-      }
-    }
-  }
-  // Only return is from inside switch statement above
-  ShouldNotReachHere();
-}
-
-void YieldingFlexibleWorkGang::abort() {
-  assert(task() != NULL, "Inconsistency; should have task binding");
-  MutexLocker ml(monitor(), Mutex::_no_safepoint_check_flag);
-  assert(yielded_workers() < active_workers(), "Consistency check");
-  #ifndef PRODUCT
-    switch (yielding_task()->status()) {
-      // allowed states
-      case ACTIVE:
-      case ABORTING:
-      case COMPLETING:
-      case YIELDING:
-        break;
-      // not allowed states
-      case INACTIVE:
-      case ABORTED:
-      case COMPLETED:
-      case YIELDED:
-      default:
-        ShouldNotReachHere();
-    }
-  #endif // !PRODUCT
-  Status prev_status = yielding_task()->status();
-  yielding_task()->set_status(ABORTING);
-  if (prev_status == YIELDING) {
-    assert(yielded_workers() > 0, "Inconsistency");
-    // At least one thread has yielded, wake it up
-    // so it can go back to waiting stations ASAP.
-    monitor()->notify_all();
-  }
-}
-
-///////////////////////////////
-// YieldingFlexibleGangTask
-///////////////////////////////
-void YieldingFlexibleGangTask::yield() {
-  assert(gang() != NULL, "No gang to signal");
-  gang()->yield();
-}
-
-void YieldingFlexibleGangTask::abort() {
-  assert(gang() != NULL, "No gang to signal");
-  gang()->abort();
-}
-
-///////////////////////////////
-// YieldingFlexibleGangWorker
-///////////////////////////////
-void YieldingFlexibleGangWorker::loop() {
-  int previous_sequence_number = 0;
-  Monitor* gang_monitor = yf_gang()->monitor();
-  MutexLocker ml(gang_monitor, Mutex::_no_safepoint_check_flag);
-  YieldingWorkData data;
-  int id;
-  while (true) {
-    // Check if there is work to do.
-    yf_gang()->internal_worker_poll(&data);
-    if (data.task() != NULL && data.sequence_number() != previous_sequence_number) {
-      // There is work to be done.
-      // First check if we need to become active or if there
-      // are already the requisite number of workers
-      if (yf_gang()->started_workers() == yf_gang()->active_workers()) {
-        // There are already enough workers, we do not need to
-        // to run; fall through and wait on monitor.
-      } else {
-        // We need to pitch in and do the work.
-        assert(yf_gang()->started_workers() < yf_gang()->active_workers(),
-               "Unexpected state");
-        id = yf_gang()->started_workers();
-        yf_gang()->internal_note_start();
-        // Now, release the gang mutex and do the work.
-        {
-          MutexUnlocker mul(gang_monitor, Mutex::_no_safepoint_check_flag);
-          GCIdMark gc_id_mark(data.task()->gc_id());
-          data.task()->work(id);   // This might include yielding
-        }
-        // Reacquire monitor and note completion of this worker
-        yf_gang()->internal_note_finish();
-        // Update status of task based on whether all workers have
-        // finished or some have yielded
-        assert(data.task() == yf_gang()->task(), "Confused task binding");
-        if (yf_gang()->finished_workers() == yf_gang()->active_workers()) {
-          switch (data.yf_task()->status()) {
-            case ABORTING: {
-              data.yf_task()->set_status(ABORTED);
-              break;
-            }
-            case ACTIVE:
-            case COMPLETING: {
-              data.yf_task()->set_status(COMPLETED);
-              break;
-            }
-            default:
-              ShouldNotReachHere();
-          }
-          gang_monitor->notify_all();  // Notify overseer
-        } else { // at least one worker is still working or yielded
-          assert(yf_gang()->finished_workers() < yf_gang()->active_workers(),
-                 "Counts inconsistent");
-          switch (data.yf_task()->status()) {
-            case ACTIVE: {
-              // first, but not only thread to complete
-              data.yf_task()->set_status(COMPLETING);
-              break;
-            }
-            case YIELDING: {
-              if (yf_gang()->finished_workers() + yf_gang()->yielded_workers()
-                  == yf_gang()->active_workers()) {
-                data.yf_task()->set_status(YIELDED);
-                gang_monitor->notify_all();  // notify overseer
-              }
-              break;
-            }
-            case ABORTING:
-            case COMPLETING: {
-              break; // nothing to do
-            }
-            default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
-              ShouldNotReachHere();
-          }
-        }
-      }
-    }
-    // Remember the sequence number
-    previous_sequence_number = data.sequence_number();
-    // Wait for more work
-    gang_monitor->wait_without_safepoint_check();
-  }
-}
--- a/src/hotspot/share/gc/cms/yieldingWorkgroup.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,272 +0,0 @@
-/*
- * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_GC_CMS_YIELDINGWORKGROUP_HPP
-#define SHARE_GC_CMS_YIELDINGWORKGROUP_HPP
-
-#include "gc/shared/workgroup.hpp"
-#include "utilities/macros.hpp"
-
-// Forward declarations
-class YieldingFlexibleGangTask;
-class YieldingFlexibleWorkGang;
-
-// Status of tasks
-enum Status {
-    INACTIVE,
-    ACTIVE,
-    YIELDING,
-    YIELDED,
-    ABORTING,
-    ABORTED,
-    COMPLETING,
-    COMPLETED
-};
-
-class YieldingWorkData: public StackObj {
-  // This would be a struct, but I want accessor methods.
-private:
-  AbstractGangTask* _task;
-  int               _sequence_number;
-public:
-  // Constructor and destructor
-  YieldingWorkData() : _task(NULL), _sequence_number(0) {}
-  ~YieldingWorkData() {}
-
-  // Accessors and modifiers
-  AbstractGangTask* task()               const { return _task; }
-  void set_task(AbstractGangTask* value)       { _task = value; }
-  int sequence_number()                  const { return _sequence_number; }
-  void set_sequence_number(int value)          { _sequence_number = value; }
-
-  YieldingFlexibleGangTask* yf_task()    const {
-    return (YieldingFlexibleGangTask*)_task;
-  }
-};
-
-// Class YieldingFlexibleGangWorker:
-//   Several instances of this class run in parallel as workers for a gang.
-class YieldingFlexibleGangWorker: public AbstractGangWorker {
-public:
-  YieldingFlexibleGangWorker(YieldingFlexibleWorkGang* gang, int id);
-
-public:
-  YieldingFlexibleWorkGang* yf_gang() const
-    { return (YieldingFlexibleWorkGang*)gang(); }
-
-protected: // Override from parent class
-  virtual void loop();
-};
-
-class FlexibleGangTask: public AbstractGangTask {
-  int _actual_size;                      // size of gang obtained
-protected:
-  int _requested_size;                   // size of gang requested
-public:
- FlexibleGangTask(const char* name): AbstractGangTask(name),
-    _requested_size(0) {}
-
-  // The abstract work method.
-  // The argument tells you which member of the gang you are.
-  virtual void work(uint worker_id) = 0;
-
-  int requested_size() const { return _requested_size; }
-  int actual_size()    const { return _actual_size; }
-
-  void set_requested_size(int sz) { _requested_size = sz; }
-  void set_actual_size(int sz)    { _actual_size    = sz; }
-};
-
-// An abstract task to be worked on by a flexible work gang,
-// and where the workers will periodically yield, usually
-// in response to some condition that is signalled by means
-// that are specific to the task at hand.
-// You subclass this to supply your own work() method.
-// A second feature of this kind of work gang is that
-// it allows for the signalling of certain exceptional
-// conditions that may be encountered during the performance
-// of the task and that may require the task at hand to be
-// `aborted' forthwith. Finally, these gangs are `flexible'
-// in that they can operate at partial capacity with some
-// gang workers waiting on the bench; in other words, the
-// size of the active worker pool can flex (up to an apriori
-// maximum) in response to task requests at certain points.
-// The last part (the flexible part) has not yet been fully
-// fleshed out and is a work in progress.
-class YieldingFlexibleGangTask: public FlexibleGangTask {
-  Status _status;
-  YieldingFlexibleWorkGang* _gang;
-
-protected:
-  // Constructor and desctructor: only construct subclasses.
-  YieldingFlexibleGangTask(const char* name): FlexibleGangTask(name),
-    _status(INACTIVE),
-    _gang(NULL) { }
-
-  ~YieldingFlexibleGangTask() { }
-
-  friend class YieldingFlexibleWorkGang;
-  friend class YieldingFlexibleGangWorker;
-
-  void set_status(Status s) {
-    _status = s;
-  }
-  YieldingFlexibleWorkGang* gang() {
-    return _gang;
-  }
-  void set_gang(YieldingFlexibleWorkGang* gang) {
-    assert(_gang == NULL || gang == NULL, "Clobber without intermediate reset?");
-    _gang = gang;
-  }
-
-public:
-  // The abstract work method.
-  // The argument tells you which member of the gang you are.
-  virtual void work(uint worker_id) = 0;
-
-  // Subclasses should call the parent's yield() method
-  // after having done any work specific to the subclass.
-  virtual void yield();
-
-  // An abstract method supplied by
-  // a concrete sub-class which is used by the coordinator
-  // to do any "central yielding" work.
-  virtual void coordinator_yield() = 0;
-
-  // Subclasses should call the parent's abort() method
-  // after having done any work specific to the sunbclass.
-  virtual void abort();
-
-  Status status()  const { return _status; }
-  bool yielding()  const { return _status == YIELDING; }
-  bool yielded()   const { return _status == YIELDED; }
-  bool completed() const { return _status == COMPLETED; }
-  bool aborted()   const { return _status == ABORTED; }
-  bool active()    const { return _status == ACTIVE; }
-
-  // This method configures the task for proper termination.
-  // Some tasks do not have any requirements on termination
-  // and may inherit this method that does nothing.  Some
-  // tasks do some coordination on termination and override
-  // this method to implement that coordination.
-  virtual void set_for_termination(uint active_workers) {}
-};
-// Class YieldingWorkGang: A subclass of WorkGang.
-// In particular, a YieldingWorkGang is made up of
-// YieldingGangWorkers, and provides infrastructure
-// supporting yielding to the "GangOverseer",
-// being the thread that orchestrates the WorkGang via run_task().
-class YieldingFlexibleWorkGang: public AbstractWorkGang {
-  // Here's the public interface to this class.
-public:
-  // Constructor and destructor.
-  YieldingFlexibleWorkGang(const char* name, uint workers,
-                           bool are_GC_task_threads);
-
-  YieldingFlexibleGangTask* yielding_task() const {
-    return task();
-  }
-  // Allocate a worker and return a pointer to it.
-  AbstractGangWorker* allocate_worker(uint which);
-
-  // Run a task; returns when the task is done, or the workers yield,
-  // or the task is aborted.
-  // A task that has been yielded can be continued via this same interface
-  // by using the same task repeatedly as the argument to the call.
-  // It is expected that the YieldingFlexibleGangTask carries the appropriate
-  // continuation information used by workers to continue the task
-  // from its last yield point. Thus, a completed task will return
-  // immediately with no actual work having been done by the workers.
-  void run_task(AbstractGangTask* task) {
-    guarantee(false, "Use start_task instead");
-  }
-  void start_task(YieldingFlexibleGangTask* new_task);
-  void continue_task(YieldingFlexibleGangTask* gang_task);
-
-  // Abort a currently running task, if any; returns when all the workers
-  // have stopped working on the current task and have returned to their
-  // waiting stations.
-  void abort_task();
-
-  // Yield: workers wait at their current working stations
-  // until signalled to proceed by the overseer.
-  void yield();
-
-  // Abort: workers are expected to return to their waiting
-  // stations, whence they are ready for the next task dispatched
-  // by the overseer.
-  void abort();
-
-private:
-  uint _yielded_workers;
-  void wait_for_gang();
-
-public:
-  // Accessors for fields
-  uint yielded_workers() const {
-    return _yielded_workers;
-  }
-
-private:
-  friend class YieldingFlexibleGangWorker;
-  void reset(); // NYI
-
-
-  // The monitor which protects these data,
-  // and notifies of changes in it.
-  Monitor*   _monitor;
-  // Accessors for fields
-  Monitor* monitor() const {
-    return _monitor;
-  }
-
-  // The number of started workers.
-  uint _started_workers;
-  // The number of finished workers.
-  uint _finished_workers;
-
-  uint started_workers() const {
-    return _started_workers;
-  }
-  uint finished_workers() const {
-    return _finished_workers;
-  }
-
-  // A sequence number for the current task.
-  int _sequence_number;
-  int sequence_number() const {
-    return _sequence_number;
-  }
-
-  YieldingFlexibleGangTask* _task;
-  YieldingFlexibleGangTask* task() const {
-    return _task;
-  }
-
-  void internal_worker_poll(YieldingWorkData* data) const;
-  void internal_note_start();
-  void internal_note_finish();
-};
-
-#endif // SHARE_GC_CMS_YIELDINGWORKGROUP_HPP
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -298,11 +298,7 @@
  *
  * In the case of slow allocation the allocation code must handle the barrier
  * as part of the allocation in the case the allocated object is not located
- * in the nursery, this would happen for humongous objects. This is similar to
- * how CMS is required to handle this case, see the comments for the method
- * CollectedHeap::new_deferred_store_barrier and OptoRuntime::new_deferred_store_barrier.
- * A deferred card mark is required for these objects and handled in the above
- * mentioned methods.
+ * in the nursery; this would happen for humongous objects.
  *
  * Returns true if the post barrier can be removed
  */
--- a/src/hotspot/share/gc/g1/g1AllocRegion.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1AllocRegion.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -250,17 +250,19 @@
 #endif // PRODUCT
 
 G1AllocRegion::G1AllocRegion(const char* name,
-                             bool bot_updates)
+                             bool bot_updates,
+                             uint node_index)
   : _alloc_region(NULL),
     _count(0),
     _used_bytes_before(0),
     _bot_updates(bot_updates),
-    _name(name)
+    _name(name),
+    _node_index(node_index)
  { }
 
 HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
                                                     bool force) {
-  return _g1h->new_mutator_alloc_region(word_size, force);
+  return _g1h->new_mutator_alloc_region(word_size, force, _node_index);
 }
 
 void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
@@ -347,7 +349,7 @@
 HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size,
                                                  bool force) {
   assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, _purpose);
+  return _g1h->new_gc_alloc_region(word_size, _purpose, _node_index);
 }
 
 void G1GCAllocRegion::retire_region(HeapRegion* alloc_region,
--- a/src/hotspot/share/gc/g1/g1AllocRegion.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1AllocRegion.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -28,6 +28,7 @@
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/g1EvacStats.hpp"
 #include "gc/g1/g1HeapRegionAttr.hpp"
+#include "gc/g1/g1NUMA.hpp"
 
 class G1CollectedHeap;
 
@@ -38,7 +39,7 @@
 // and a lock will need to be taken when the active region needs to be
 // replaced.
 
-class G1AllocRegion {
+class G1AllocRegion : public CHeapObj<mtGC> {
 
 private:
   // The active allocating region we are currently allocating out
@@ -91,6 +92,9 @@
   HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force);
 
 protected:
+  // The memory node index this allocation region belongs to.
+  uint _node_index;
+
   // Reset the alloc region to point a the dummy region.
   void reset_alloc_region();
 
@@ -131,7 +135,7 @@
   virtual void retire_region(HeapRegion* alloc_region,
                              size_t allocated_bytes) = 0;
 
-  G1AllocRegion(const char* name, bool bot_updates);
+  G1AllocRegion(const char* name, bool bot_updates, uint node_index);
 
 public:
   static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region);
@@ -220,8 +224,8 @@
   virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
   virtual size_t retire(bool fill_up);
 public:
-  MutatorAllocRegion()
-    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */),
+  MutatorAllocRegion(uint node_index)
+    : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */, node_index),
       _wasted_bytes(0),
       _retained_alloc_region(NULL) { }
 
@@ -245,6 +249,7 @@
 
   virtual void init();
 };
+
 // Common base class for allocation regions used during GC.
 class G1GCAllocRegion : public G1AllocRegion {
 protected:
@@ -256,16 +261,17 @@
 
   virtual size_t retire(bool fill_up);
 
-  G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, G1HeapRegionAttr::region_type_t purpose)
-  : G1AllocRegion(name, bot_updates), _stats(stats), _purpose(purpose) {
+  G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats,
+                  G1HeapRegionAttr::region_type_t purpose, uint node_index = G1NUMA::AnyNodeIndex)
+  : G1AllocRegion(name, bot_updates, node_index), _stats(stats), _purpose(purpose) {
     assert(stats != NULL, "Must pass non-NULL PLAB statistics");
   }
 };
 
 class SurvivorGCAllocRegion : public G1GCAllocRegion {
 public:
-  SurvivorGCAllocRegion(G1EvacStats* stats)
-  : G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, G1HeapRegionAttr::Young) { }
+  SurvivorGCAllocRegion(G1EvacStats* stats, uint node_index)
+  : G1GCAllocRegion("Survivor GC Alloc Region", false /* bot_updates */, stats, G1HeapRegionAttr::Young, node_index) { }
 };
 
 class OldGCAllocRegion : public G1GCAllocRegion {
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -28,6 +28,7 @@
 #include "gc/g1/g1EvacStats.inline.hpp"
 #include "gc/g1/g1EvacuationInfo.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1NUMA.hpp"
 #include "gc/g1/g1Policy.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
@@ -36,22 +37,53 @@
 
 G1Allocator::G1Allocator(G1CollectedHeap* heap) :
   _g1h(heap),
+  _numa(heap->numa()),
   _survivor_is_full(false),
   _old_is_full(false),
-  _mutator_alloc_region(),
-  _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)),
+  _num_alloc_regions(_numa->num_active_nodes()),
+  _mutator_alloc_regions(NULL),
+  _survivor_gc_alloc_regions(NULL),
   _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)),
   _retained_old_gc_alloc_region(NULL) {
+
+  _mutator_alloc_regions = NEW_C_HEAP_ARRAY(MutatorAllocRegion, _num_alloc_regions, mtGC);
+  _survivor_gc_alloc_regions = NEW_C_HEAP_ARRAY(SurvivorGCAllocRegion, _num_alloc_regions, mtGC);
+  G1EvacStats* stat = heap->alloc_buffer_stats(G1HeapRegionAttr::Young);
+
+  for (uint i = 0; i < _num_alloc_regions; i++) {
+    ::new(_mutator_alloc_regions + i) MutatorAllocRegion(i);
+    ::new(_survivor_gc_alloc_regions + i) SurvivorGCAllocRegion(stat, i);
+  }
 }
 
-void G1Allocator::init_mutator_alloc_region() {
-  assert(_mutator_alloc_region.get() == NULL, "pre-condition");
-  _mutator_alloc_region.init();
+G1Allocator::~G1Allocator() {
+  for (uint i = 0; i < _num_alloc_regions; i++) {
+    _mutator_alloc_regions[i].~MutatorAllocRegion();
+    _survivor_gc_alloc_regions[i].~SurvivorGCAllocRegion();
+  }
+  FREE_C_HEAP_ARRAY(MutatorAllocRegion, _mutator_alloc_regions);
+  FREE_C_HEAP_ARRAY(SurvivorGCAllocRegion, _survivor_gc_alloc_regions);
 }
 
-void G1Allocator::release_mutator_alloc_region() {
-  _mutator_alloc_region.release();
-  assert(_mutator_alloc_region.get() == NULL, "post-condition");
+#ifdef ASSERT
+bool G1Allocator::has_mutator_alloc_region() {
+  uint node_index = current_node_index();
+  return mutator_alloc_region(node_index)->get() != NULL;
+}
+#endif
+
+void G1Allocator::init_mutator_alloc_regions() {
+  for (uint i = 0; i < _num_alloc_regions; i++) {
+    assert(mutator_alloc_region(i)->get() == NULL, "pre-condition");
+    mutator_alloc_region(i)->init();
+  }
+}
+
+void G1Allocator::release_mutator_alloc_regions() {
+  for (uint i = 0; i < _num_alloc_regions; i++) {
+    mutator_alloc_region(i)->release();
+    assert(mutator_alloc_region(i)->get() == NULL, "post-condition");
+  }
 }
 
 bool G1Allocator::is_retained_old_region(HeapRegion* hr) {
@@ -97,7 +129,10 @@
   _survivor_is_full = false;
   _old_is_full = false;
 
-  _survivor_gc_alloc_region.init();
+  for (uint i = 0; i < _num_alloc_regions; i++) {
+    survivor_gc_alloc_region(i)->init();
+  }
+
   _old_gc_alloc_region.init();
   reuse_retained_old_region(evacuation_info,
                             &_old_gc_alloc_region,
@@ -105,9 +140,14 @@
 }
 
 void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) {
-  evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() +
+  uint survivor_region_count = 0;
+  for (uint node_index = 0; node_index < _num_alloc_regions; node_index++) {
+    survivor_region_count += survivor_gc_alloc_region(node_index)->count();
+    survivor_gc_alloc_region(node_index)->release();
+  }
+  evacuation_info.set_allocation_regions(survivor_region_count +
                                          old_gc_alloc_region()->count());
-  survivor_gc_alloc_region()->release();
+
   // If we have an old GC alloc region to release, we'll save it in
   // _retained_old_gc_alloc_region. If we don't
   // _retained_old_gc_alloc_region will become NULL. This is what we
@@ -117,7 +157,9 @@
 }
 
 void G1Allocator::abandon_gc_alloc_regions() {
-  assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition");
+  for (uint i = 0; i < _num_alloc_regions; i++) {
+    assert(survivor_gc_alloc_region(i)->get() == NULL, "pre-condition");
+  }
   assert(old_gc_alloc_region()->get() == NULL, "pre-condition");
   _retained_old_gc_alloc_region = NULL;
 }
@@ -146,7 +188,8 @@
   // since we can't allow tlabs to grow big enough to accommodate
   // humongous objects.
 
-  HeapRegion* hr = mutator_alloc_region()->get();
+  uint node_index = current_node_index();
+  HeapRegion* hr = mutator_alloc_region(node_index)->get();
   size_t max_tlab = _g1h->max_tlab_size() * wordSize;
   if (hr == NULL) {
     return max_tlab;
@@ -157,14 +200,19 @@
 
 size_t G1Allocator::used_in_alloc_regions() {
   assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf.");
-  return mutator_alloc_region()->used_in_alloc_regions();
+  size_t used = 0;
+  for (uint i = 0; i < _num_alloc_regions; i++) {
+    used += mutator_alloc_region(i)->used_in_alloc_regions();
+  }
+  return used;
 }
 
 
 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
-                                              size_t word_size) {
+                                              size_t word_size,
+                                              uint node_index) {
   size_t temp = 0;
-  HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp);
+  HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, node_index);
   assert(result == NULL || temp == word_size,
          "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
          word_size, temp, p2i(result));
@@ -174,10 +222,11 @@
 HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
                                               size_t min_word_size,
                                               size_t desired_word_size,
-                                              size_t* actual_word_size) {
+                                              size_t* actual_word_size,
+                                              uint node_index) {
   switch (dest.type()) {
     case G1HeapRegionAttr::Young:
-      return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
+      return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, node_index);
     case G1HeapRegionAttr::Old:
       return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
     default:
@@ -188,18 +237,19 @@
 
 HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
                                                    size_t desired_word_size,
-                                                   size_t* actual_word_size) {
+                                                   size_t* actual_word_size,
+                                                   uint node_index) {
   assert(!_g1h->is_humongous(desired_word_size),
          "we should not be seeing humongous-size allocations in this path");
 
-  HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size,
-                                                                    desired_word_size,
-                                                                    actual_word_size);
+  HeapWord* result = survivor_gc_alloc_region(node_index)->attempt_allocation(min_word_size,
+                                                                              desired_word_size,
+                                                                              actual_word_size);
   if (result == NULL && !survivor_is_full()) {
     MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag);
-    result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size,
-                                                                   desired_word_size,
-                                                                   actual_word_size);
+    result = survivor_gc_alloc_region(node_index)->attempt_allocation_locked(min_word_size,
+                                                                             desired_word_size,
+                                                                             actual_word_size);
     if (result == NULL) {
       set_survivor_full();
     }
@@ -246,15 +296,25 @@
 G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) :
   _g1h(G1CollectedHeap::heap()),
   _allocator(allocator),
-  _surviving_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Young)),
-  _tenured_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Old)),
   _survivor_alignment_bytes(calc_survivor_alignment_bytes()) {
-  for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
+  for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
     _direct_allocated[state] = 0;
-    _alloc_buffers[state] = NULL;
+    uint length = alloc_buffers_length(state);
+    _alloc_buffers[state] = NEW_C_HEAP_ARRAY(PLAB*, length, mtGC);
+    for (uint node_index = 0; node_index < length; node_index++) {
+      _alloc_buffers[state][node_index] = new PLAB(_g1h->desired_plab_sz(state));
+    }
   }
-  _alloc_buffers[G1HeapRegionAttr::Young] = &_surviving_alloc_buffer;
-  _alloc_buffers[G1HeapRegionAttr::Old]  = &_tenured_alloc_buffer;
+}
+
+G1PLABAllocator::~G1PLABAllocator() {
+  for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
+    uint length = alloc_buffers_length(state);
+    for (uint node_index = 0; node_index < length; node_index++) {
+      delete _alloc_buffers[state][node_index];
+    }
+    FREE_C_HEAP_ARRAY(PLAB*, _alloc_buffers[state]);
+  }
 }
 
 bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const {
@@ -263,7 +323,8 @@
 
 HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
                                                        size_t word_sz,
-                                                       bool* plab_refill_failed) {
+                                                       bool* plab_refill_failed,
+                                                       uint node_index) {
   size_t plab_word_size = _g1h->desired_plab_sz(dest);
   size_t required_in_plab = PLAB::size_required_for_allocation(word_sz);
 
@@ -272,14 +333,15 @@
   if ((required_in_plab <= plab_word_size) &&
     may_throw_away_buffer(required_in_plab, plab_word_size)) {
 
-    PLAB* alloc_buf = alloc_buffer(dest);
+    PLAB* alloc_buf = alloc_buffer(dest, node_index);
     alloc_buf->retire();
 
     size_t actual_plab_size = 0;
     HeapWord* buf = _allocator->par_allocate_during_gc(dest,
                                                        required_in_plab,
                                                        plab_word_size,
-                                                       &actual_plab_size);
+                                                       &actual_plab_size,
+                                                       node_index);
 
     assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
            "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
@@ -298,35 +360,39 @@
     *plab_refill_failed = true;
   }
   // Try direct allocation.
-  HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz);
+  HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, node_index);
   if (result != NULL) {
     _direct_allocated[dest.type()] += word_sz;
   }
   return result;
 }
 
-void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz) {
-  alloc_buffer(dest)->undo_allocation(obj, word_sz);
+void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index) {
+  alloc_buffer(dest, node_index)->undo_allocation(obj, word_sz);
 }
 
 void G1PLABAllocator::flush_and_retire_stats() {
-  for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
-    PLAB* const buf = _alloc_buffers[state];
-    if (buf != NULL) {
-      G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
-      buf->flush_and_retire_stats(stats);
-      stats->add_direct_allocated(_direct_allocated[state]);
-      _direct_allocated[state] = 0;
+  for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
+    G1EvacStats* stats = _g1h->alloc_buffer_stats(state);
+    for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
+      PLAB* const buf = alloc_buffer(state, node_index);
+      if (buf != NULL) {
+        buf->flush_and_retire_stats(stats);
+      }
     }
+    stats->add_direct_allocated(_direct_allocated[state]);
+    _direct_allocated[state] = 0;
   }
 }
 
 size_t G1PLABAllocator::waste() const {
   size_t result = 0;
-  for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
-    PLAB * const buf = _alloc_buffers[state];
-    if (buf != NULL) {
-      result += buf->waste();
+  for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
+    for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
+      PLAB* const buf = alloc_buffer(state, node_index);
+      if (buf != NULL) {
+        result += buf->waste();
+      }
     }
   }
   return result;
@@ -334,10 +400,12 @@
 
 size_t G1PLABAllocator::undo_waste() const {
   size_t result = 0;
-  for (uint state = 0; state < G1HeapRegionAttr::Num; state++) {
-    PLAB * const buf = _alloc_buffers[state];
-    if (buf != NULL) {
-      result += buf->undo_waste();
+  for (region_type_t state = 0; state < G1HeapRegionAttr::Num; state++) {
+    for (uint node_index = 0; node_index < alloc_buffers_length(state); node_index++) {
+      PLAB* const buf = alloc_buffer(state, node_index);
+      if (buf != NULL) {
+        result += buf->undo_waste();
+      }
     }
   }
   return result;
--- a/src/hotspot/share/gc/g1/g1Allocator.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1Allocator.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -31,6 +31,7 @@
 #include "gc/shared/plab.hpp"
 
 class G1EvacuationInfo;
+class G1NUMA;
 
 // Interface to keep track of which regions G1 is currently allocating into. Provides
 // some accessors (e.g. allocating into them, or getting their occupancy).
@@ -40,16 +41,20 @@
 
 private:
   G1CollectedHeap* _g1h;
+  G1NUMA* _numa;
 
   bool _survivor_is_full;
   bool _old_is_full;
 
+  // The number of MutatorAllocRegions used, one per memory node.
+  size_t _num_alloc_regions;
+
   // Alloc region used to satisfy mutator allocation requests.
-  MutatorAllocRegion _mutator_alloc_region;
+  MutatorAllocRegion* _mutator_alloc_regions;
 
   // Alloc region used to satisfy allocation requests by the GC for
   // survivor objects.
-  SurvivorGCAllocRegion _survivor_gc_alloc_region;
+  SurvivorGCAllocRegion* _survivor_gc_alloc_regions;
 
   // Alloc region used to satisfy allocation requests by the GC for
   // old objects.
@@ -68,29 +73,37 @@
                                  HeapRegion** retained);
 
   // Accessors to the allocation regions.
-  inline MutatorAllocRegion* mutator_alloc_region();
-  inline SurvivorGCAllocRegion* survivor_gc_alloc_region();
+  inline MutatorAllocRegion* mutator_alloc_region(uint node_index);
+  inline SurvivorGCAllocRegion* survivor_gc_alloc_region(uint node_index);
   inline OldGCAllocRegion* old_gc_alloc_region();
 
   // Allocation attempt during GC for a survivor object / PLAB.
   HeapWord* survivor_attempt_allocation(size_t min_word_size,
-                                               size_t desired_word_size,
-                                               size_t* actual_word_size);
+                                        size_t desired_word_size,
+                                        size_t* actual_word_size,
+                                        uint node_index);
 
   // Allocation attempt during GC for an old object / PLAB.
   HeapWord* old_attempt_allocation(size_t min_word_size,
-                                          size_t desired_word_size,
-                                          size_t* actual_word_size);
+                                   size_t desired_word_size,
+                                   size_t* actual_word_size);
+
+  // Node index of current thread.
+  inline uint current_node_index() const;
+
 public:
   G1Allocator(G1CollectedHeap* heap);
+  ~G1Allocator();
+
+  uint num_nodes() { return (uint)_num_alloc_regions; }
 
 #ifdef ASSERT
   // Do we currently have an active mutator region to allocate into?
-  bool has_mutator_alloc_region() { return mutator_alloc_region()->get() != NULL; }
+  bool has_mutator_alloc_region();
 #endif
 
-  void init_mutator_alloc_region();
-  void release_mutator_alloc_region();
+  void init_mutator_alloc_regions();
+  void release_mutator_alloc_regions();
 
   void init_gc_alloc_regions(G1EvacuationInfo& evacuation_info);
   void release_gc_alloc_regions(G1EvacuationInfo& evacuation_info);
@@ -113,12 +126,14 @@
   // heap, and then allocate a block of the given size. The block
   // may not be a humongous - it must fit into a single heap region.
   HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
-                                   size_t word_size);
+                                   size_t word_size,
+                                   uint node_index);
 
   HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
                                    size_t min_word_size,
                                    size_t desired_word_size,
-                                   size_t* actual_word_size);
+                                   size_t* actual_word_size,
+                                   uint node_index);
 };
 
 // Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
@@ -127,12 +142,12 @@
 class G1PLABAllocator : public CHeapObj<mtGC> {
   friend class G1ParScanThreadState;
 private:
+  typedef G1HeapRegionAttr::region_type_t region_type_t;
+
   G1CollectedHeap* _g1h;
   G1Allocator* _allocator;
 
-  PLAB  _surviving_alloc_buffer;
-  PLAB  _tenured_alloc_buffer;
-  PLAB* _alloc_buffers[G1HeapRegionAttr::Num];
+  PLAB** _alloc_buffers[G1HeapRegionAttr::Num];
 
   // The survivor alignment in effect in bytes.
   // == 0 : don't align survivors
@@ -145,7 +160,13 @@
   size_t _direct_allocated[G1HeapRegionAttr::Num];
 
   void flush_and_retire_stats();
-  inline PLAB* alloc_buffer(G1HeapRegionAttr dest);
+  inline PLAB* alloc_buffer(G1HeapRegionAttr dest, uint node_index) const;
+  inline PLAB* alloc_buffer(region_type_t dest, uint node_index) const;
+
+  // Returns the number of allocation buffers for the given dest.
+  // There is only 1 buffer for Old while Young may have multiple buffers depending on
+  // active NUMA nodes.
+  inline uint alloc_buffers_length(region_type_t dest) const;
 
   // Calculate the survivor space object alignment in bytes. Returns that or 0 if
   // there are no restrictions on survivor alignment.
@@ -154,6 +175,7 @@
   bool may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const;
 public:
   G1PLABAllocator(G1Allocator* allocator);
+  ~G1PLABAllocator();
 
   size_t waste() const;
   size_t undo_waste() const;
@@ -164,18 +186,21 @@
   // PLAB failed or not.
   HeapWord* allocate_direct_or_new_plab(G1HeapRegionAttr dest,
                                         size_t word_sz,
-                                        bool* plab_refill_failed);
+                                        bool* plab_refill_failed,
+                                        uint node_index);
 
   // Allocate word_sz words in the PLAB of dest.  Returns the address of the
   // allocated memory, NULL if not successful.
   inline HeapWord* plab_allocate(G1HeapRegionAttr dest,
-                                 size_t word_sz);
+                                 size_t word_sz,
+                                 uint node_index);
 
   inline HeapWord* allocate(G1HeapRegionAttr dest,
                             size_t word_sz,
-                            bool* refill_failed);
+                            bool* refill_failed,
+                            uint node_index);
 
-  void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz);
+  void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index);
 };
 
 // G1ArchiveRegionMap is a boolean array used to mark G1 regions as
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -30,12 +30,18 @@
 #include "gc/shared/plab.inline.hpp"
 #include "memory/universe.hpp"
 
-inline MutatorAllocRegion* G1Allocator::mutator_alloc_region() {
-  return &_mutator_alloc_region;
+inline uint G1Allocator::current_node_index() const {
+  return _numa->index_of_current_thread();
 }
 
-inline SurvivorGCAllocRegion* G1Allocator::survivor_gc_alloc_region() {
-  return &_survivor_gc_alloc_region;
+inline MutatorAllocRegion* G1Allocator::mutator_alloc_region(uint node_index) {
+  assert(node_index < _num_alloc_regions, "Invalid index: %u", node_index);
+  return &_mutator_alloc_regions[node_index];
+}
+
+inline SurvivorGCAllocRegion* G1Allocator::survivor_gc_alloc_region(uint node_index) {
+  assert(node_index < _num_alloc_regions, "Invalid index: %u", node_index);
+  return &_survivor_gc_alloc_regions[node_index];
 }
 
 inline OldGCAllocRegion* G1Allocator::old_gc_alloc_region() {
@@ -45,35 +51,60 @@
 inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size,
                                                  size_t desired_word_size,
                                                  size_t* actual_word_size) {
-  HeapWord* result = mutator_alloc_region()->attempt_retained_allocation(min_word_size, desired_word_size, actual_word_size);
+  uint node_index = current_node_index();
+  HeapWord* result = mutator_alloc_region(node_index)->attempt_retained_allocation(min_word_size, desired_word_size, actual_word_size);
   if (result != NULL) {
     return result;
   }
-  return mutator_alloc_region()->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
+  return mutator_alloc_region(node_index)->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
 }
 
 inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
-  HeapWord* result = mutator_alloc_region()->attempt_allocation_locked(word_size);
-  assert(result != NULL || mutator_alloc_region()->get() == NULL,
-         "Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region()->get()));
+  uint node_index = current_node_index();
+  HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_locked(word_size);
+  assert(result != NULL || mutator_alloc_region(node_index)->get() == NULL,
+         "Must not have a mutator alloc region if there is no memory, but is " PTR_FORMAT, p2i(mutator_alloc_region(node_index)->get()));
   return result;
 }
 
 inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
-  return mutator_alloc_region()->attempt_allocation_force(word_size);
+  uint node_index = current_node_index();
+  return mutator_alloc_region(node_index)->attempt_allocation_force(word_size);
 }
 
-inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest) {
+inline PLAB* G1PLABAllocator::alloc_buffer(G1HeapRegionAttr dest, uint node_index) const {
   assert(dest.is_valid(),
          "Allocation buffer index out of bounds: %s", dest.get_type_str());
   assert(_alloc_buffers[dest.type()] != NULL,
          "Allocation buffer is NULL: %s", dest.get_type_str());
-  return _alloc_buffers[dest.type()];
+  return alloc_buffer(dest.type(), node_index);
+}
+
+inline PLAB* G1PLABAllocator::alloc_buffer(region_type_t dest, uint node_index) const {
+  assert(dest < G1HeapRegionAttr::Num,
+         "Allocation buffer index out of bounds: %u", dest);
+
+  if (dest == G1HeapRegionAttr::Young) {
+    assert(node_index < alloc_buffers_length(dest),
+           "Allocation buffer index out of bounds: %u, %u", dest, node_index);
+    return _alloc_buffers[dest][node_index];
+  } else {
+    return _alloc_buffers[dest][0];
+  }
+}
+
+inline uint G1PLABAllocator::alloc_buffers_length(region_type_t dest) const {
+  if (dest == G1HeapRegionAttr::Young) {
+    return _allocator->num_nodes();
+  } else {
+    return 1;
+  }
 }
 
 inline HeapWord* G1PLABAllocator::plab_allocate(G1HeapRegionAttr dest,
-                                                size_t word_sz) {
-  PLAB* buffer = alloc_buffer(dest);
+                                                size_t word_sz,
+                                                uint node_index) {
+  PLAB* buffer = alloc_buffer(dest, node_index);
   if (_survivor_alignment_bytes == 0 || !dest.is_young()) {
     return buffer->allocate(word_sz);
   } else {
@@ -83,12 +114,13 @@
 
 inline HeapWord* G1PLABAllocator::allocate(G1HeapRegionAttr dest,
                                            size_t word_sz,
-                                           bool* refill_failed) {
-  HeapWord* const obj = plab_allocate(dest, word_sz);
+                                           bool* refill_failed,
+                                           uint node_index) {
+  HeapWord* const obj = plab_allocate(dest, word_sz, node_index);
   if (obj != NULL) {
     return obj;
   }
-  return allocate_direct_or_new_plab(dest, word_sz, refill_failed);
+  return allocate_direct_or_new_plab(dest, word_sz, refill_failed, node_index);
 }
 
 // Create the maps which is used to identify archive objects.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -169,12 +169,15 @@
 
 // Private methods.
 
-HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {
+HeapRegion* G1CollectedHeap::new_region(size_t word_size,
+                                        HeapRegionType type,
+                                        bool do_expand,
+                                        uint node_index) {
   assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
          "the only time we use this to allocate a humongous region is "
          "when we are allocating a single humongous region");
 
-  HeapRegion* res = _hrm->allocate_free_region(type);
+  HeapRegion* res = _hrm->allocate_free_region(type, node_index);
 
   if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
     // Currently, only attempts to allocate GC alloc regions set
@@ -186,12 +189,15 @@
     log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
                               word_size * HeapWordSize);
 
-    if (expand(word_size * HeapWordSize)) {
-      // Given that expand() succeeded in expanding the heap, and we
+    assert(word_size * HeapWordSize < HeapRegion::GrainBytes,
+           "This kind of expansion should never be more than one region. Size: " SIZE_FORMAT,
+           word_size * HeapWordSize);
+    if (expand_single_region(node_index)) {
+      // Given that expand_single_region() succeeded in expanding the heap, and we
       // always expand the heap by an amount aligned to the heap
       // region size, the free list should in theory not be empty.
       // In either case allocate_free_region() will check for NULL.
-      res = _hrm->allocate_free_region(type);
+      res = _hrm->allocate_free_region(type, node_index);
     } else {
       _expand_heap_after_alloc_failure = false;
     }
@@ -1020,7 +1026,7 @@
 
 void G1CollectedHeap::prepare_heap_for_full_collection() {
   // Make sure we'll choose a new allocation region afterwards.
-  _allocator->release_mutator_alloc_region();
+  _allocator->release_mutator_alloc_regions();
   _allocator->abandon_gc_alloc_regions();
 
   // We may have added regions to the current incremental collection
@@ -1064,7 +1070,7 @@
   // Start a new incremental collection set for the next pause
   start_new_collection_set();
 
-  _allocator->init_mutator_alloc_region();
+  _allocator->init_mutator_alloc_regions();
 
   // Post collection state updates.
   MetaspaceGC::compute_new_size();
@@ -1381,6 +1387,19 @@
   return regions_to_expand > 0;
 }
 
+bool G1CollectedHeap::expand_single_region(uint node_index) {
+  uint expanded_by = _hrm->expand_on_preferred_node(node_index);
+
+  if (expanded_by == 0) {
+    assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available());
+    log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
+    return false;
+  }
+
+  policy()->record_new_heap_size(num_regions());
+  return true;
+}
+
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
   size_t aligned_shrink_bytes =
     ReservedSpace::page_align_size_down(shrink_bytes);
@@ -1391,7 +1410,6 @@
   uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
   size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
 
-
   log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
                             shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
   if (num_regions_removed > 0) {
@@ -1493,6 +1511,7 @@
   _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
   _bot(NULL),
   _listener(),
+  _numa(G1NUMA::create()),
   _hrm(NULL),
   _allocator(NULL),
   _verifier(NULL),
@@ -1775,6 +1794,8 @@
   }
   _workers->initialize_workers();
 
+  _numa->set_region_info(HeapRegion::GrainBytes, page_size);
+
   // Create the G1ConcurrentMark data structure and thread.
   // (Must do this late, so that "max_regions" is defined.)
   _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
@@ -1822,7 +1843,7 @@
   dummy_region->set_top(dummy_region->end());
   G1AllocRegion::setup(this, dummy_region);
 
-  _allocator->init_mutator_alloc_region();
+  _allocator->init_mutator_alloc_regions();
 
   // Do create of the monitoring and management support so that
   // values in the heap have been properly initialized.
@@ -1988,7 +2009,7 @@
 }
 
 bool G1CollectedHeap::should_upgrade_to_full_gc(GCCause::Cause cause) {
-  if(policy()->force_upgrade_to_full()) {
+  if (policy()->force_upgrade_to_full()) {
     return true;
   } else if (should_do_concurrent_full_gc(_gc_cause)) {
     return false;
@@ -2035,7 +2056,7 @@
 }
 
 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
-  MonitorLocker x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+  MonitorLocker ml(G1OldGCCount_lock, Mutex::_no_safepoint_check_flag);
 
   // We assume that if concurrent == true, then the caller is a
   // concurrent thread that was joined the Suspendible Thread
@@ -2075,91 +2096,211 @@
     _cm_thread->set_idle();
   }
 
-  // This notify_all() will ensure that a thread that called
-  // System.gc() with (with ExplicitGCInvokesConcurrent set or not)
-  // and it's waiting for a full GC to finish will be woken up. It is
-  // waiting in VM_G1CollectForAllocation::doit_epilogue().
-  FullGCCount_lock->notify_all();
+  // Notify threads waiting in System.gc() (with ExplicitGCInvokesConcurrent)
+  // for a full GC to finish that their wait is over.
+  ml.notify_all();
 }
 
 void G1CollectedHeap::collect(GCCause::Cause cause) {
-  try_collect(cause, true);
+  try_collect(cause);
+}
+
+// Return true if (x < y) with allowance for wraparound.
+static bool gc_counter_less_than(uint x, uint y) {
+  return (x - y) > (UINT_MAX/2);
 }
 
-bool G1CollectedHeap::try_collect(GCCause::Cause cause, bool retry_on_gc_failure) {
+// LOG_COLLECT_CONCURRENTLY(cause, msg, args...)
+// Macro so msg printing is format-checked.
+#define LOG_COLLECT_CONCURRENTLY(cause, ...)                            \
+  do {                                                                  \
+    LogTarget(Trace, gc) LOG_COLLECT_CONCURRENTLY_lt;                   \
+    if (LOG_COLLECT_CONCURRENTLY_lt.is_enabled()) {                     \
+      ResourceMark rm; /* For thread name. */                           \
+      LogStream LOG_COLLECT_CONCURRENTLY_s(&LOG_COLLECT_CONCURRENTLY_lt); \
+      LOG_COLLECT_CONCURRENTLY_s.print("%s: Try Collect Concurrently (%s): ", \
+                                       Thread::current()->name(),       \
+                                       GCCause::to_string(cause));      \
+      LOG_COLLECT_CONCURRENTLY_s.print(__VA_ARGS__);                    \
+    }                                                                   \
+  } while (0)
+
+#define LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, result) \
+  LOG_COLLECT_CONCURRENTLY(cause, "complete %s", BOOL_TO_STR(result))
+
+bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
+                                               uint gc_counter,
+                                               uint old_marking_started_before) {
   assert_heap_not_locked();
-
-  bool gc_succeeded;
-  bool should_retry_gc;
-
-  do {
-    should_retry_gc = false;
-
-    uint gc_count_before;
-    uint old_marking_count_before;
-    uint full_gc_count_before;
-
+  assert(should_do_concurrent_full_gc(cause),
+         "Non-concurrent cause %s", GCCause::to_string(cause));
+
+  for (uint i = 1; true; ++i) {
+    // Try to schedule an initial-mark evacuation pause that will
+    // start a concurrent cycle.
+    LOG_COLLECT_CONCURRENTLY(cause, "attempt %u", i);
+    VM_G1TryInitiateConcMark op(gc_counter,
+                                cause,
+                                policy()->max_pause_time_ms());
+    VMThread::execute(&op);
+
+    // Request is trivially finished.
+    if (cause == GCCause::_g1_periodic_collection) {
+      LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, op.gc_succeeded());
+      return op.gc_succeeded();
+    }
+
+    // Lock to get consistent set of values.
+    uint old_marking_started_after;
+    uint old_marking_completed_after;
     {
       MutexLocker ml(Heap_lock);
-
-      // Read the GC count while holding the Heap_lock
-      gc_count_before = total_collections();
-      full_gc_count_before = total_full_collections();
-      old_marking_count_before = _old_marking_cycles_started;
+      // Update gc_counter for retrying VMOp if needed. Captured here to be
+      // consistent with the values we use below for termination tests.  If
+      // a retry is needed after a possible wait, and another collection
+      // occurs in the meantime, it will cause our retry to be skipped and
+      // we'll recheck for termination with updated conditions from that
+      // more recent collection.  That's what we want, rather than having
+      // our retry possibly perform an unnecessary collection.
+      gc_counter = total_collections();
+      old_marking_started_after = _old_marking_cycles_started;
+      old_marking_completed_after = _old_marking_cycles_completed;
     }
 
-    if (should_do_concurrent_full_gc(cause)) {
-      // Schedule an initial-mark evacuation pause that will start a
-      // concurrent cycle. We're setting word_size to 0 which means that
-      // we are not requesting a post-GC allocation.
-      VM_G1CollectForAllocation op(0,     /* word_size */
-                                   gc_count_before,
-                                   cause,
-                                   true,  /* should_initiate_conc_mark */
-                                   policy()->max_pause_time_ms());
-      VMThread::execute(&op);
-      gc_succeeded = op.gc_succeeded();
-      if (!gc_succeeded && retry_on_gc_failure) {
-        if (old_marking_count_before == _old_marking_cycles_started) {
-          should_retry_gc = op.should_retry_gc();
-        } else {
-          // A Full GC happened while we were trying to schedule the
-          // concurrent cycle. No point in starting a new cycle given
-          // that the whole heap was collected anyway.
+    if (!GCCause::is_user_requested_gc(cause)) {
+      // For an "automatic" (not user-requested) collection, we just need to
+      // ensure that progress is made.
+      //
+      // Request is finished if any of
+      // (1) the VMOp successfully performed a GC,
+      // (2) a concurrent cycle was already in progress,
+      // (3) a new cycle was started (by this thread or some other), or
+      // (4) a Full GC was performed.
+      // Cases (3) and (4) are detected together by a change to
+      // _old_marking_cycles_started.
+      //
+      // Note that (1) does not imply (3).  If we're still in the mixed
+      // phase of an earlier concurrent collection, the request to make the
+      // collection an initial-mark won't be honored.  If we don't check for
+      // both conditions we'll spin doing back-to-back collections.
+      if (op.gc_succeeded() ||
+          op.cycle_already_in_progress() ||
+          (old_marking_started_before != old_marking_started_after)) {
+        LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
+        return true;
+      }
+    } else {                    // User-requested GC.
+      // For a user-requested collection, we want to ensure that a complete
+      // full collection has been performed before returning, but without
+      // waiting for more than needed.
+
+      // For user-requested GCs (unlike non-UR), a successful VMOp implies a
+      // new cycle was started.  That's good, because it's not clear what we
+      // should do otherwise.  Trying again just does back to back GCs.
+      // Can't wait for someone else to start a cycle.  And returning fails
+      // to meet the goal of ensuring a full collection was performed.
+      assert(!op.gc_succeeded() ||
+             (old_marking_started_before != old_marking_started_after),
+             "invariant: succeeded %s, started before %u, started after %u",
+             BOOL_TO_STR(op.gc_succeeded()),
+             old_marking_started_before, old_marking_started_after);
+
+      // Request is finished if a full collection (concurrent or stw)
+      // was started after this request and has completed, e.g.
+      // started_before < completed_after.
+      if (gc_counter_less_than(old_marking_started_before,
+                               old_marking_completed_after)) {
+        LOG_COLLECT_CONCURRENTLY_COMPLETE(cause, true);
+        return true;
+      }
+
+      if (old_marking_started_after != old_marking_completed_after) {
+        // If there is an in-progress cycle (possibly started by us), then
+        // wait for that cycle to complete, e.g.
+        // while completed_now < started_after.
+        LOG_COLLECT_CONCURRENTLY(cause, "wait");
+        MonitorLocker ml(G1OldGCCount_lock);
+        while (gc_counter_less_than(_old_marking_cycles_completed,
+                                    old_marking_started_after)) {
+          ml.wait();
         }
-
-        if (should_retry_gc && GCLocker::is_active_and_needs_gc()) {
-          GCLocker::stall_until_clear();
+        // Request is finished if the collection we just waited for was
+        // started after this request.
+        if (old_marking_started_before != old_marking_started_after) {
+          LOG_COLLECT_CONCURRENTLY(cause, "complete after wait");
+          return true;
         }
       }
-    } else if (GCLocker::should_discard(cause, gc_count_before)) {
-      // Return false to be consistent with VMOp failure due to
-      // another collection slipping in after our gc_count but before
-      // our request is processed.  _gc_locker collections upgraded by
-      // GCLockerInvokesConcurrent are handled above and never discarded.
-      return false;
-    } else {
-      if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
-          DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
-
-        // Schedule a standard evacuation pause. We're setting word_size
-        // to 0 which means that we are not requesting a post-GC allocation.
-        VM_G1CollectForAllocation op(0,     /* word_size */
-                                     gc_count_before,
-                                     cause,
-                                     false, /* should_initiate_conc_mark */
-                                     policy()->max_pause_time_ms());
-        VMThread::execute(&op);
-        gc_succeeded = op.gc_succeeded();
-      } else {
-        // Schedule a Full GC.
-        VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
-        VMThread::execute(&op);
-        gc_succeeded = op.gc_succeeded();
+
+      // If VMOp was successful then it started a new cycle that the above
+      // wait &etc should have recognized as finishing this request.  This
+      // differs from a non-user-request, where gc_succeeded does not imply
+      // a new cycle was started.
+      assert(!op.gc_succeeded(), "invariant");
+
+      // If VMOp failed because a cycle was already in progress, it is now
+      // complete.  But it didn't finish this user-requested GC, so try
+      // again.
+      if (op.cycle_already_in_progress()) {
+        LOG_COLLECT_CONCURRENTLY(cause, "retry after in-progress");
+        continue;
       }
     }
-  } while (should_retry_gc);
-  return gc_succeeded;
+
+    // Collection failed and should be retried.
+    assert(op.transient_failure(), "invariant");
+
+    // If GCLocker is active, wait until clear before retrying.
+    if (GCLocker::is_active_and_needs_gc()) {
+      LOG_COLLECT_CONCURRENTLY(cause, "gc-locker stall");
+      GCLocker::stall_until_clear();
+    }
+
+    LOG_COLLECT_CONCURRENTLY(cause, "retry");
+  }
+}
+
+bool G1CollectedHeap::try_collect(GCCause::Cause cause) {
+  assert_heap_not_locked();
+
+  // Lock to get consistent set of values.
+  uint gc_count_before;
+  uint full_gc_count_before;
+  uint old_marking_started_before;
+  {
+    MutexLocker ml(Heap_lock);
+    gc_count_before = total_collections();
+    full_gc_count_before = total_full_collections();
+    old_marking_started_before = _old_marking_cycles_started;
+  }
+
+  if (should_do_concurrent_full_gc(cause)) {
+    return try_collect_concurrently(cause,
+                                    gc_count_before,
+                                    old_marking_started_before);
+  } else if (GCLocker::should_discard(cause, gc_count_before)) {
+    // Indicate failure to be consistent with VMOp failure due to
+    // another collection slipping in after our gc_count but before
+    // our request is processed.  _gc_locker collections upgraded by
+    // GCLockerInvokesConcurrent are handled above and never discarded.
+    return false;
+  } else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
+             DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
+
+    // Schedule a standard evacuation pause. We're setting word_size
+    // to 0 which means that we are not requesting a post-GC allocation.
+    VM_G1CollectForAllocation op(0,     /* word_size */
+                                 gc_count_before,
+                                 cause,
+                                 policy()->max_pause_time_ms());
+    VMThread::execute(&op);
+    return op.gc_succeeded();
+  } else {
+    // Schedule a Full GC.
+    VM_G1CollectFull op(gc_count_before, full_gc_count_before, cause);
+    VMThread::execute(&op);
+    return op.gc_succeeded();
+  }
 }
 
 bool G1CollectedHeap::is_in(const void* p) const {
@@ -2368,6 +2509,15 @@
   st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
             (size_t) survivor_regions * HeapRegion::GrainBytes / K);
   st->cr();
+  if (_numa->is_enabled()) {
+    uint num_nodes = _numa->num_active_nodes();
+    st->print("  remaining free region(s) on each NUMA node: ");
+    const int* node_ids = _numa->node_ids();
+    for (uint node_index = 0; node_index < num_nodes; node_index++) {
+      st->print("%d=%u ", node_ids[node_index], _hrm->num_free_regions(node_index));
+    }
+    st->cr();
+  }
   MetaspaceUtils::print_on(st);
 }
 
@@ -2557,6 +2707,20 @@
   // We have just completed a GC. Update the soft reference
   // policy with the new heap occupancy
   Universe::update_heap_info_at_gc();
+
+  // Print NUMA statistics.
+  _numa->print_statistics();
+}
+
+void G1CollectedHeap::verify_numa_regions(const char* desc) {
+  LogTarget(Trace, gc, heap, verify) lt;
+
+  if (lt.is_enabled()) {
+    LogStream ls(lt);
+    // Iterate all heap regions to print matching between preferred numa id and actual numa id.
+    G1NodeIndexCheckClosure cl(desc, _numa, &ls);
+    heap_region_iterate(&cl);
+  }
 }
 
 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
@@ -2567,7 +2731,6 @@
   VM_G1CollectForAllocation op(word_size,
                                gc_count_before,
                                gc_cause,
-                               false, /* should_initiate_conc_mark */
                                policy()->max_pause_time_ms());
   VMThread::execute(&op);
 
@@ -2866,6 +3029,7 @@
   }
   _verifier->verify_before_gc(type);
   _verifier->check_bitmaps("GC Start");
+  verify_numa_regions("GC Start");
 }
 
 void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType type) {
@@ -2876,6 +3040,7 @@
   }
   _verifier->verify_after_gc(type);
   _verifier->check_bitmaps("GC End");
+  verify_numa_regions("GC End");
 }
 
 void G1CollectedHeap::expand_heap_after_young_collection(){
@@ -3005,7 +3170,7 @@
 
         // Forget the current allocation region (we might even choose it to be part
         // of the collection set!).
-        _allocator->release_mutator_alloc_region();
+        _allocator->release_mutator_alloc_regions();
 
         calculate_collection_set(evacuation_info, target_pause_time_ms);
 
@@ -3042,7 +3207,7 @@
 
         allocate_dummy_regions();
 
-        _allocator->init_mutator_alloc_region();
+        _allocator->init_mutator_alloc_regions();
 
         expand_heap_after_young_collection();
 
@@ -4538,13 +4703,15 @@
 // Methods for the mutator alloc region
 
 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
-                                                      bool force) {
+                                                      bool force,
+                                                      uint node_index) {
   assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
   bool should_allocate = policy()->should_allocate_mutator_region();
   if (force || should_allocate) {
     HeapRegion* new_alloc_region = new_region(word_size,
                                               HeapRegionType::Eden,
-                                              false /* do_expand */);
+                                              false /* do_expand */,
+                                              node_index);
     if (new_alloc_region != NULL) {
       set_region_short_lived_locked(new_alloc_region);
       _hr_printer.alloc(new_alloc_region, !should_allocate);
@@ -4582,7 +4749,7 @@
   }
 }
 
-HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest) {
+HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index) {
   assert(FreeList_lock->owned_by_self(), "pre-condition");
 
   if (!has_more_regions(dest)) {
@@ -4598,7 +4765,8 @@
 
   HeapRegion* new_alloc_region = new_region(word_size,
                                             type,
-                                            true /* do_expand */);
+                                            true /* do_expand */,
+                                            node_index);
 
   if (new_alloc_region != NULL) {
     if (type.is_survivor()) {
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -41,6 +41,7 @@
 #include "gc/g1/g1HRPrinter.hpp"
 #include "gc/g1/g1HeapRegionAttr.hpp"
 #include "gc/g1/g1MonitoringSupport.hpp"
+#include "gc/g1/g1NUMA.hpp"
 #include "gc/g1/g1RedirtyCardsQueue.hpp"
 #include "gc/g1/g1SurvivorRegions.hpp"
 #include "gc/g1/g1YCTypes.hpp"
@@ -132,6 +133,7 @@
   friend class VM_CollectForMetadataAllocation;
   friend class VM_G1CollectForAllocation;
   friend class VM_G1CollectFull;
+  friend class VM_G1TryInitiateConcMark;
   friend class VMStructs;
   friend class MutatorAllocRegion;
   friend class G1FullCollector;
@@ -191,6 +193,9 @@
   // Callback for region mapping changed events.
   G1RegionMappingChangedListener _listener;
 
+  // Handle G1 NUMA support.
+  G1NUMA* _numa;
+
   // The sequence of all heap regions in the heap.
   HeapRegionManager* _hrm;
 
@@ -255,16 +260,22 @@
 
   G1HRPrinter _hr_printer;
 
-  // It decides whether an explicit GC should start a concurrent cycle
-  // instead of doing a STW GC. Currently, a concurrent cycle is
-  // explicitly started if:
-  // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
-  // (b) cause == _g1_humongous_allocation
-  // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
-  // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent.
-  // (e) cause == _wb_conc_mark
+  // Return true if an explicit GC should start a concurrent cycle instead
+  // of doing a STW full GC. A concurrent cycle should be started if:
+  // (a) cause == _gc_locker and +GCLockerInvokesConcurrent,
+  // (b) cause == _g1_humongous_allocation,
+  // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent,
+  // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent,
+  // (e) cause == _wb_conc_mark,
+  // (f) cause == _g1_periodic_collection and +G1PeriodicGCInvokesConcurrent.
   bool should_do_concurrent_full_gc(GCCause::Cause cause);
 
+  // Attempt to start a concurrent cycle with the indicated cause.
+  // precondition: should_do_concurrent_full_gc(cause)
+  bool try_collect_concurrently(GCCause::Cause cause,
+                                uint gc_counter,
+                                uint old_marking_started_before);
+
   // Return true if should upgrade to full gc after an incremental one.
   bool should_upgrade_to_full_gc(GCCause::Cause cause);
 
@@ -387,7 +398,10 @@
   // attempt to expand the heap if necessary to satisfy the allocation
   // request. 'type' takes the type of region to be allocated. (Use constants
   // Old, Eden, Humongous, Survivor defined in HeapRegionType.)
-  HeapRegion* new_region(size_t word_size, HeapRegionType type, bool do_expand);
+  HeapRegion* new_region(size_t word_size,
+                         HeapRegionType type,
+                         bool do_expand,
+                         uint node_index = G1NUMA::AnyNodeIndex);
 
   // Initialize a contiguous set of free regions of length num_regions
   // and starting at index first so that they appear as a single
@@ -462,13 +476,13 @@
   // These methods are the "callbacks" from the G1AllocRegion class.
 
   // For mutator alloc regions.
-  HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
+  HeapRegion* new_mutator_alloc_region(size_t word_size, bool force, uint node_index);
   void retire_mutator_alloc_region(HeapRegion* alloc_region,
                                    size_t allocated_bytes);
 
   // For GC alloc regions.
   bool has_more_regions(G1HeapRegionAttr dest);
-  HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest);
+  HeapRegion* new_gc_alloc_region(size_t word_size, G1HeapRegionAttr dest, uint node_index);
   void retire_gc_alloc_region(HeapRegion* alloc_region,
                               size_t allocated_bytes, G1HeapRegionAttr dest);
 
@@ -523,6 +537,9 @@
   // Merges the information gathered on a per-thread basis for all worker threads
   // during GC into global variables.
   void merge_per_thread_state_info(G1ParScanThreadStateSet* per_thread_states);
+
+  void verify_numa_regions(const char* desc);
+
 public:
   G1YoungRemSetSamplingThread* sampling_thread() const { return _young_gen_sampling_thread; }
 
@@ -547,11 +564,14 @@
 
   void resize_heap_if_necessary();
 
+  G1NUMA* numa() const { return _numa; }
+
   // Expand the garbage-first heap by at least the given size (in bytes!).
   // Returns true if the heap was expanded by the requested amount;
   // false otherwise.
   // (Rounds up to a HeapRegion boundary.)
   bool expand(size_t expand_bytes, WorkGang* pretouch_workers = NULL, double* expand_time_ms = NULL);
+  bool expand_single_region(uint node_index);
 
   // Returns the PLAB statistics for a given destination.
   inline G1EvacStats* alloc_buffer_stats(G1HeapRegionAttr dest);
@@ -617,7 +637,7 @@
   // Full GC). If concurrent is true, the caller is the outer caller
   // in this nesting (i.e., the concurrent cycle). Further nesting is
   // not currently supported. The end of this call also notifies
-  // the FullGCCount_lock in case a Java thread is waiting for a full
+  // the G1OldGCCount_lock in case a Java thread is waiting for a full
   // GC to happen (e.g., it called System.gc() with
   // +ExplicitGCInvokesConcurrent).
   void increment_old_marking_cycles_completed(bool concurrent);
@@ -1075,10 +1095,9 @@
   // "CollectedHeap" supports.
   virtual void collect(GCCause::Cause cause);
 
-  // Perform a collection of the heap with the given cause; if the VM operation
-  // fails to execute for any reason, retry only if retry_on_gc_failure is set.
+  // Perform a collection of the heap with the given cause.
   // Returns whether this collection actually executed.
-  bool try_collect(GCCause::Cause cause, bool retry_on_gc_failure);
+  bool try_collect(GCCause::Cause cause);
 
   // True iff an evacuation has failed in the most-recent collection.
   bool evacuation_failed() { return _evacuation_failed; }
@@ -1274,7 +1293,9 @@
   const G1SurvivorRegions* survivor() const { return &_survivor; }
 
   uint eden_regions_count() const { return _eden.length(); }
+  uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
   uint survivor_regions_count() const { return _survivor.length(); }
+  uint survivor_regions_count(uint node_index) const { return _survivor.regions_on_node(node_index); }
   size_t eden_regions_used_bytes() const { return _eden.used_bytes(); }
   size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
   uint young_regions_count() const { return _eden.length() + _survivor.length(); }
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -393,7 +393,7 @@
     }
 
     // Update the number of full collections that have been
-    // completed. This will also notify the FullGCCount_lock in case a
+    // completed. This will also notify the G1OldGCCount_lock in case a
     // Java thread is waiting for a full GC to happen (e.g., it
     // called System.gc() with +ExplicitGCInvokesConcurrent).
     {
--- a/src/hotspot/share/gc/g1/g1EdenRegions.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1EdenRegions.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -25,6 +25,7 @@
 #ifndef SHARE_GC_G1_G1EDENREGIONS_HPP
 #define SHARE_GC_G1_G1EDENREGIONS_HPP
 
+#include "gc/g1/g1RegionsOnNodes.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "runtime/globals.hpp"
 #include "utilities/debug.hpp"
@@ -35,18 +36,25 @@
   // Sum of used bytes from all retired eden regions.
   // I.e. updated when mutator regions are retired.
   volatile size_t _used_bytes;
+  G1RegionsOnNodes  _regions_on_node;
 
 public:
-  G1EdenRegions() : _length(0), _used_bytes(0) { }
+  G1EdenRegions() : _length(0), _used_bytes(0), _regions_on_node() { }
 
-  void add(HeapRegion* hr) {
+  virtual uint add(HeapRegion* hr) {
     assert(!hr->is_eden(), "should not already be set");
     _length++;
+    return _regions_on_node.add(hr);
   }
 
-  void clear() { _length = 0; _used_bytes = 0; }
+  void clear() {
+    _length = 0;
+    _used_bytes = 0;
+    _regions_on_node.clear();
+  }
 
   uint length() const { return _length; }
+  uint regions_on_node(uint node_index) const { return _regions_on_node.count(node_index); }
 
   size_t used_bytes() const { return _used_bytes; }
 
--- a/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -26,15 +26,38 @@
 #include "gc/g1/g1CollectedHeap.hpp"
 #include "gc/g1/g1HeapTransition.hpp"
 #include "gc/g1/g1Policy.hpp"
-#include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/metaspace.hpp"
 
-G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) {
-  _eden_length = g1_heap->eden_regions_count();
-  _survivor_length = g1_heap->survivor_regions_count();
-  _old_length = g1_heap->old_regions_count();
-  _archive_length = g1_heap->archive_regions_count();
-  _humongous_length = g1_heap->humongous_regions_count();
+G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) :
+  _eden_length(g1_heap->eden_regions_count()),
+  _survivor_length(g1_heap->survivor_regions_count()),
+  _old_length(g1_heap->old_regions_count()),
+  _archive_length(g1_heap->archive_regions_count()),
+  _humongous_length(g1_heap->humongous_regions_count()),
+  _eden_length_per_node(NULL),
+  _survivor_length_per_node(NULL) {
+
+  uint node_count = G1NUMA::numa()->num_active_nodes();
+
+  if (node_count > 1) {
+    LogTarget(Debug, gc, heap, numa) lt;
+
+    if (lt.is_enabled()) {
+      _eden_length_per_node = NEW_C_HEAP_ARRAY(uint, node_count, mtGC);
+      _survivor_length_per_node = NEW_C_HEAP_ARRAY(uint, node_count, mtGC);
+
+      for (uint i = 0; i < node_count; i++) {
+        _eden_length_per_node[i] = g1_heap->eden_regions_count(i);
+        _survivor_length_per_node[i] = g1_heap->survivor_regions_count(i);
+      }
+    }
+  }
+}
+
+G1HeapTransition::Data::~Data() {
+  FREE_C_HEAP_ARRAY(uint, _eden_length_per_node);
+  FREE_C_HEAP_ARRAY(uint, _survivor_length_per_node);
 }
 
 G1HeapTransition::G1HeapTransition(G1CollectedHeap* g1_heap) : _g1_heap(g1_heap), _before(g1_heap) { }
@@ -84,6 +107,34 @@
   }
 };
 
+static void log_regions(const char* msg, size_t before_length, size_t after_length, size_t capacity,
+                        uint* before_per_node_length, uint* after_per_node_length) {
+  LogTarget(Info, gc, heap) lt;
+
+  if (lt.is_enabled()) {
+    LogStream ls(lt);
+
+    ls.print("%s regions: " SIZE_FORMAT "->" SIZE_FORMAT "("  SIZE_FORMAT ")",
+             msg, before_length, after_length, capacity);
+    // Not NULL only if gc+heap+numa at Debug level is enabled.
+    if (before_per_node_length != NULL && after_per_node_length != NULL) {
+      G1NUMA* numa = G1NUMA::numa();
+      uint num_nodes = numa->num_active_nodes();
+      const int* node_ids = numa->node_ids();
+      ls.print(" (");
+      for (uint i = 0; i < num_nodes; i++) {
+        ls.print("%d: %u->%u", node_ids[i], before_per_node_length[i], after_per_node_length[i]);
+        // Skip adding below if it is the last one.
+        if (i != num_nodes - 1) {
+          ls.print(", ");
+        }
+      }
+      ls.print(")");
+    }
+    ls.print_cr("");
+  }
+}
+
 void G1HeapTransition::print() {
   Data after(_g1_heap);
 
@@ -106,12 +157,12 @@
         after._humongous_length, usage._humongous_region_count);
   }
 
-  log_info(gc, heap)("Eden regions: " SIZE_FORMAT "->" SIZE_FORMAT "("  SIZE_FORMAT ")",
-                     _before._eden_length, after._eden_length, eden_capacity_length_after_gc);
+  log_regions("Eden", _before._eden_length, after._eden_length, eden_capacity_length_after_gc,
+              _before._eden_length_per_node, after._eden_length_per_node);
   log_trace(gc, heap)(" Used: 0K, Waste: 0K");
 
-  log_info(gc, heap)("Survivor regions: " SIZE_FORMAT "->" SIZE_FORMAT "("  SIZE_FORMAT ")",
-                     _before._survivor_length, after._survivor_length, survivor_capacity_length_before_gc);
+  log_regions("Survivor", _before._survivor_length, after._survivor_length, survivor_capacity_length_before_gc,
+              _before._survivor_length_per_node, after._survivor_length_per_node);
   log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
       usage._survivor_used / K, ((after._survivor_length * HeapRegion::GrainBytes) - usage._survivor_used) / K);
 
--- a/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1HeapTransition.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -39,7 +39,13 @@
     size_t _humongous_length;
     const metaspace::MetaspaceSizesSnapshot _meta_sizes;
 
+    // Only includes current eden regions.
+    uint* _eden_length_per_node;
+    // Only includes current survivor regions.
+    uint* _survivor_length_per_node;
+
     Data(G1CollectedHeap* g1_heap);
+    ~Data();
   };
 
   G1CollectedHeap* _g1_heap;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1NUMA.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1NUMA.hpp"
+#include "logging/logStream.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/os.hpp"
+
+G1NUMA* G1NUMA::_inst = NULL;
+
+size_t G1NUMA::region_size() const {
+  assert(_region_size > 0, "Heap region size is not yet set");
+  return _region_size;
+}
+
+size_t G1NUMA::page_size() const {
+  assert(_page_size > 0, "Page size not is yet set");
+  return _page_size;
+}
+
+bool G1NUMA::is_enabled() const { return num_active_nodes() > 1; }
+
+G1NUMA* G1NUMA::create() {
+  guarantee(_inst == NULL, "Should be called once.");
+  _inst = new G1NUMA();
+
+  // NUMA only supported on Linux.
+#ifdef LINUX
+  _inst->initialize(UseNUMA);
+#else
+  _inst->initialize(false);
+#endif /* LINUX */
+
+  return _inst;
+}
+
+  // Returns memory node ids
+const int* G1NUMA::node_ids() const {
+  return _node_ids;
+}
+
+uint G1NUMA::index_of_node_id(int node_id) const {
+  assert(node_id >= 0, "invalid node id %d", node_id);
+  assert(node_id < _len_node_id_to_index_map, "invalid node id %d", node_id);
+  uint node_index = _node_id_to_index_map[node_id];
+  assert(node_index != G1NUMA::UnknownNodeIndex,
+         "invalid node id %d", node_id);
+  return node_index;
+}
+
+G1NUMA::G1NUMA() :
+  _node_id_to_index_map(NULL), _len_node_id_to_index_map(0),
+  _node_ids(NULL), _num_active_node_ids(0),
+  _region_size(0), _page_size(0), _stats(NULL) {
+}
+
+void G1NUMA::initialize_without_numa() {
+  // If NUMA is not enabled or supported, initialize as having a singel node.
+  _num_active_node_ids = 1;
+  _node_ids = NEW_C_HEAP_ARRAY(int, _num_active_node_ids, mtGC);
+  _node_ids[0] = 0;
+  // Map index 0 to node 0
+  _len_node_id_to_index_map = 1;
+  _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
+  _node_id_to_index_map[0] = 0;
+}
+
+void G1NUMA::initialize(bool use_numa) {
+  if (!use_numa) {
+    initialize_without_numa();
+    return;
+  }
+
+  assert(UseNUMA, "Invariant");
+  size_t num_node_ids = os::numa_get_groups_num();
+
+  // Create an array of active node ids.
+  _node_ids = NEW_C_HEAP_ARRAY(int, num_node_ids, mtGC);
+  _num_active_node_ids = (uint)os::numa_get_leaf_groups(_node_ids, num_node_ids);
+
+  int max_node_id = 0;
+  for (uint i = 0; i < _num_active_node_ids; i++) {
+    max_node_id = MAX2(max_node_id, _node_ids[i]);
+  }
+
+  // Create a mapping between node_id and index.
+  _len_node_id_to_index_map = max_node_id + 1;
+  _node_id_to_index_map = NEW_C_HEAP_ARRAY(uint, _len_node_id_to_index_map, mtGC);
+
+  // Set all indices with unknown node id.
+  for (int i = 0; i < _len_node_id_to_index_map; i++) {
+    _node_id_to_index_map[i] = G1NUMA::UnknownNodeIndex;
+  }
+
+  // Set the indices for the actually retrieved node ids.
+  for (uint i = 0; i < _num_active_node_ids; i++) {
+    _node_id_to_index_map[_node_ids[i]] = i;
+  }
+
+  _stats = new G1NUMAStats(_node_ids, _num_active_node_ids);
+}
+
+G1NUMA::~G1NUMA() {
+  delete _stats;
+  FREE_C_HEAP_ARRAY(int, _node_id_to_index_map);
+  FREE_C_HEAP_ARRAY(int, _node_ids);
+}
+
+void G1NUMA::set_region_info(size_t region_size, size_t page_size) {
+  _region_size = region_size;
+  _page_size = page_size;
+}
+
+uint G1NUMA::num_active_nodes() const {
+  assert(_num_active_node_ids > 0, "just checking");
+  return _num_active_node_ids;
+}
+
+uint G1NUMA::index_of_current_thread() const {
+  if (!is_enabled()) {
+    return 0;
+  }
+  return index_of_node_id(os::numa_get_group_id());
+}
+
+uint G1NUMA::preferred_node_index_for_index(uint region_index) const {
+  if (region_size() >= page_size()) {
+    // Simple case, pages are smaller than the region so we
+    // can just alternate over the nodes.
+    return region_index % _num_active_node_ids;
+  } else {
+    // Multiple regions in one page, so we need to make sure the
+    // regions within a page is preferred on the same node.
+    size_t regions_per_page = page_size() / region_size();
+    return (region_index / regions_per_page) % _num_active_node_ids;
+  }
+}
+
+int G1NUMA::numa_id(int index) const {
+  assert(index < _len_node_id_to_index_map, "Index %d out of range: [0,%d)",
+         index, _len_node_id_to_index_map);
+  return _node_ids[index];
+}
+
+uint G1NUMA::index_of_address(HeapWord *address) const {
+  int numa_id = os::numa_get_group_id_for_address((const void*)address);
+  if (numa_id == -1) {
+    return UnknownNodeIndex;
+  } else {
+    return index_of_node_id(numa_id);
+  }
+}
+
+uint G1NUMA::index_for_region(HeapRegion* hr) const {
+  if (!is_enabled()) {
+    return 0;
+  }
+
+  if (AlwaysPreTouch) {
+    // If we already pretouched, we can check actual node index here.
+    // However, if node index is still unknown, use preferred node index.
+    uint node_index = index_of_address(hr->bottom());
+    if (node_index != UnknownNodeIndex) {
+      return node_index;
+    }
+  }
+
+  return preferred_node_index_for_index(hr->hrm_index());
+}
+
+// Request to spread the given memory evenly across the available NUMA
+// nodes. Which node to request for a given address is given by the
+// region size and the page size. Below are two examples on 4 NUMA nodes system:
+//   1. G1HeapRegionSize(_region_size) is larger than or equal to page size.
+//      * Page #:       |-0--||-1--||-2--||-3--||-4--||-5--||-6--||-7--||-8--||-9--||-10-||-11-||-12-||-13-||-14-||-15-|
+//      * HeapRegion #: |----#0----||----#1----||----#2----||----#3----||----#4----||----#5----||----#6----||----#7----|
+//      * NUMA node #:  |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
+//   2. G1HeapRegionSize(_region_size) is smaller than page size.
+//      Memory will be touched one page at a time because G1RegionToSpaceMapper commits
+//      pages one by one.
+//      * Page #:       |-----0----||-----1----||-----2----||-----3----||-----4----||-----5----||-----6----||-----7----|
+//      * HeapRegion #: |-#0-||-#1-||-#2-||-#3-||-#4-||-#5-||-#6-||-#7-||-#8-||-#9-||#10-||#11-||#12-||#13-||#14-||#15-|
+//      * NUMA node #:  |----#0----||----#1----||----#2----||----#3----||----#0----||----#1----||----#2----||----#3----|
+void G1NUMA::request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index) {
+  if (!is_enabled()) {
+    return;
+  }
+
+  if (size_in_bytes == 0) {
+    return;
+  }
+
+  uint node_index = preferred_node_index_for_index(region_index);
+
+  assert(is_aligned(aligned_address, page_size()), "Given address (" PTR_FORMAT ") should be aligned.", p2i(aligned_address));
+  assert(is_aligned(size_in_bytes, page_size()), "Given size (" SIZE_FORMAT ") should be aligned.", size_in_bytes);
+
+  log_trace(gc, heap, numa)("Request memory [" PTR_FORMAT ", " PTR_FORMAT ") to be NUMA id (%d)",
+                            p2i(aligned_address), p2i((char*)aligned_address + size_in_bytes), _node_ids[node_index]);
+  os::numa_make_local((char*)aligned_address, size_in_bytes, _node_ids[node_index]);
+}
+
+uint G1NUMA::max_search_depth() const {
+  // Multiple of 3 is just random number to limit iterations.
+  // There would be some cases that 1 page may be consisted of multiple HeapRegions.
+  return 3 * MAX2((uint)(page_size() / region_size()), (uint)1) * num_active_nodes();
+}
+
+void G1NUMA::update_statistics(G1NUMAStats::NodeDataItems phase,
+                               uint requested_node_index,
+                               uint allocated_node_index) {
+  if (_stats == NULL) {
+    return;
+  }
+
+  uint converted_req_index;
+  if(requested_node_index < _num_active_node_ids) {
+    converted_req_index = requested_node_index;
+  } else {
+    assert(requested_node_index == AnyNodeIndex,
+           "Requested node index %u should be AnyNodeIndex.", requested_node_index);
+    converted_req_index = _num_active_node_ids;
+  }
+  _stats->update(phase, converted_req_index, allocated_node_index);
+}
+
+void G1NUMA::copy_statistics(G1NUMAStats::NodeDataItems phase,
+                             uint requested_node_index,
+                             size_t* allocated_stat) {
+  if (_stats == NULL) {
+    return;
+  }
+
+  _stats->copy(phase, requested_node_index, allocated_stat);
+}
+
+void G1NUMA::print_statistics() const {
+  if (_stats == NULL) {
+    return;
+  }
+
+  _stats->print_statistics();
+}
+
+G1NodeIndexCheckClosure::G1NodeIndexCheckClosure(const char* desc, G1NUMA* numa, LogStream* ls) :
+  _desc(desc), _numa(numa), _ls(ls) {
+
+  uint num_nodes = _numa->num_active_nodes();
+  _matched = NEW_C_HEAP_ARRAY(uint, num_nodes, mtGC);
+  _mismatched = NEW_C_HEAP_ARRAY(uint, num_nodes, mtGC);
+  _total = NEW_C_HEAP_ARRAY(uint, num_nodes, mtGC);
+  memset(_matched, 0, sizeof(uint) * num_nodes);
+  memset(_mismatched, 0, sizeof(uint) * num_nodes);
+  memset(_total, 0, sizeof(uint) * num_nodes);
+}
+
+G1NodeIndexCheckClosure::~G1NodeIndexCheckClosure() {
+  _ls->print("%s: NUMA region verification (id: matched/mismatched/total): ", _desc);
+  const int* numa_ids = _numa->node_ids();
+  for (uint i = 0; i < _numa->num_active_nodes(); i++) {
+    _ls->print("%d: %u/%u/%u ", numa_ids[i], _matched[i], _mismatched[i], _total[i]);
+  }
+
+  FREE_C_HEAP_ARRAY(uint, _matched);
+  FREE_C_HEAP_ARRAY(uint, _mismatched);
+  FREE_C_HEAP_ARRAY(uint, _total);
+}
+
+bool G1NodeIndexCheckClosure::do_heap_region(HeapRegion* hr) {
+  // Preferred node index will only have valid node index.
+  uint preferred_node_index = _numa->preferred_node_index_for_index(hr->hrm_index());
+  // Active node index may have UnknownNodeIndex.
+  uint active_node_index = _numa->index_of_address(hr->bottom());
+
+  if (preferred_node_index == active_node_index) {
+    _matched[preferred_node_index]++;
+  } else if (active_node_index != G1NUMA::UnknownNodeIndex) {
+    _mismatched[preferred_node_index]++;
+  }
+  _total[preferred_node_index]++;
+
+  return false;
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1NUMA.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_NUMA_HPP
+#define SHARE_VM_GC_G1_NUMA_HPP
+
+#include "gc/g1/g1NUMAStats.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/os.hpp"
+
+class LogStream;
+
+class G1NUMA: public CHeapObj<mtGC> {
+  // Mapping of available node ids to  0-based index which can be used for
+  // fast resource management. I.e. for every node id provides a unique value in
+  // the range from [0, {# of nodes-1}].
+  // For invalid node id, return UnknownNodeIndex.
+  uint* _node_id_to_index_map;
+  // Length of _num_active_node_ids_id to index map.
+  int _len_node_id_to_index_map;
+
+  // Current active node ids.
+  int* _node_ids;
+  // Total number of node ids.
+  uint _num_active_node_ids;
+
+  // HeapRegion size
+  size_t _region_size;
+  // Necessary when touching memory.
+  size_t _page_size;
+
+  // Stores statistic data.
+  G1NUMAStats* _stats;
+
+  size_t region_size() const;
+  size_t page_size() const;
+
+  // Returns node index of the given node id.
+  // Precondition: node_id is an active node id.
+  inline uint index_of_node_id(int node_id) const;
+
+  // Creates node id and node index mapping table of _node_id_to_index_map.
+  void init_node_id_to_index_map(const int* node_ids, uint num_node_ids);
+
+  static G1NUMA* _inst;
+
+  G1NUMA();
+  void initialize(bool use_numa);
+  void initialize_without_numa();
+
+public:
+  static const uint UnknownNodeIndex = UINT_MAX;
+  static const uint AnyNodeIndex = UnknownNodeIndex - 1;
+
+  static G1NUMA* numa() { return _inst; }
+
+  static G1NUMA* create();
+
+  ~G1NUMA();
+
+  // Sets heap region size and page size after those values
+  // are determined at G1CollectedHeap::initialize().
+  void set_region_info(size_t region_size, size_t page_size);
+
+  // Returns active memory node count.
+  uint num_active_nodes() const;
+
+  bool is_enabled() const;
+
+  int numa_id(int index) const;
+
+  // Returns memory node ids
+  const int* node_ids() const;
+
+  // Returns node index of current calling thread.
+  uint index_of_current_thread() const;
+
+  // Returns the preferred index for the given HeapRegion index.
+  // This assumes that HeapRegions are evenly spit, so we can decide preferred index
+  // with the given HeapRegion index.
+  // Result is less than num_active_nodes().
+  uint preferred_node_index_for_index(uint region_index) const;
+
+  // Retrieves node index of the given address.
+  // Result is less than num_active_nodes() or is UnknownNodeIndex.
+  // Precondition: address is in reserved range for heap.
+  uint index_of_address(HeapWord* address) const;
+
+  // If AlwaysPreTouch is enabled, return actual node index via system call.
+  // If disabled, return preferred node index of the given heap region.
+  uint index_for_region(HeapRegion* hr) const;
+
+  // Requests the given memory area to be located at the given node index.
+  void request_memory_on_node(void* aligned_address, size_t size_in_bytes, uint region_index);
+
+  // Returns maximum search depth which is used to limit heap region search iterations.
+  // The number of active nodes, page size and heap region size are considered.
+  uint max_search_depth() const;
+
+  // Update the given phase of requested and allocated node index.
+  void update_statistics(G1NUMAStats::NodeDataItems phase, uint requested_node_index, uint allocated_node_index);
+
+  // Copy all allocated statistics of the given phase and requested node.
+  // Precondition: allocated_stat should have same length of active nodes.
+  void copy_statistics(G1NUMAStats::NodeDataItems phase, uint requested_node_index, size_t* allocated_stat);
+
+  // Print all statistics.
+  void print_statistics() const;
+};
+
+class G1NodeIndexCheckClosure : public HeapRegionClosure {
+  const char* _desc;
+  G1NUMA* _numa;
+  // Records matched count of each node.
+  uint* _matched;
+  // Records mismatched count of each node.
+  uint* _mismatched;
+  // Records total count of each node.
+  // Total = matched + mismatched + unknown.
+  uint* _total;
+  LogStream* _ls;
+
+public:
+  G1NodeIndexCheckClosure(const char* desc, G1NUMA* numa, LogStream* ls);
+  ~G1NodeIndexCheckClosure();
+
+  bool do_heap_region(HeapRegion* hr);
+};
+
+#endif // SHARE_VM_GC_G1_NUMA_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1NUMAStats.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1NUMAStats.hpp"
+#include "logging/logStream.hpp"
+
+double G1NUMAStats::Stat::rate() const {
+  return _requested == 0 ? 0 : (double)_hit / _requested * 100;
+}
+
+G1NUMAStats::NodeDataArray::NodeDataArray(uint num_nodes) {
+  guarantee(num_nodes > 1, "Number of nodes (%u) should be set", num_nodes);
+
+  // The row represents the number of nodes.
+  _num_column = num_nodes;
+  // +1 for G1MemoryNodeManager::AnyNodeIndex.
+  _num_row = num_nodes + 1;
+
+  _data = NEW_C_HEAP_ARRAY(size_t*, _num_row, mtGC);
+  for (uint row = 0; row < _num_row; row++) {
+    _data[row] = NEW_C_HEAP_ARRAY(size_t, _num_column, mtGC);
+  }
+
+  clear();
+}
+
+G1NUMAStats::NodeDataArray::~NodeDataArray() {
+  for (uint row = 0; row < _num_row; row++) {
+    FREE_C_HEAP_ARRAY(size_t, _data[row]);
+  }
+  FREE_C_HEAP_ARRAY(size_t*, _data);
+}
+
+void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result) const {
+  size_t requested = 0;
+  size_t hit = 0;
+
+  for (size_t row = 0; row < _num_row; row++) {
+    for (size_t column = 0; column < _num_column; column++) {
+      requested += _data[row][column];
+      if (row == column) {
+        hit += _data[row][column];
+      }
+    }
+  }
+
+  assert(result != NULL, "Invariant");
+  result->_hit = hit;
+  result->_requested = requested;
+}
+
+void G1NUMAStats::NodeDataArray::create_hit_rate(Stat* result, uint req_index) const {
+  size_t requested = 0;
+  size_t hit = _data[req_index][req_index];
+
+  for (size_t column = 0; column < _num_column; column++) {
+    requested += _data[req_index][column];
+  }
+
+  assert(result != NULL, "Invariant");
+  result->_hit = hit;
+  result->_requested = requested;
+}
+
+size_t G1NUMAStats::NodeDataArray::sum(uint req_index) const {
+  size_t sum = 0;
+  for (size_t column = 0; column < _num_column; column++) {
+    sum += _data[req_index][column];
+  }
+
+  return sum;
+}
+
+void G1NUMAStats::NodeDataArray::increase(uint req_index, uint alloc_index) {
+  assert(req_index < _num_row,
+         "Requested index %u should be less than the row size %u",
+         req_index, _num_row);
+  assert(alloc_index < _num_column,
+         "Allocated index %u should be less than the column size %u",
+         alloc_index, _num_column);
+  _data[req_index][alloc_index] += 1;
+}
+
+void G1NUMAStats::NodeDataArray::clear() {
+  for (uint row = 0; row < _num_row; row++) {
+    memset((void*)_data[row], 0, sizeof(size_t) * _num_column);
+  }
+}
+
+size_t G1NUMAStats::NodeDataArray::get(uint req_index, uint alloc_index) {
+  return _data[req_index][alloc_index];
+}
+
+void G1NUMAStats::NodeDataArray::copy(uint req_index, size_t* stat) {
+  assert(stat != NULL, "Invariant");
+
+  for (uint column = 0; column < _num_column; column++) {
+    _data[req_index][column] += stat[column];
+  }
+}
+
+G1NUMAStats::G1NUMAStats(const int* node_ids, uint num_node_ids) :
+  _node_ids(node_ids), _num_node_ids(num_node_ids), _node_data() {
+
+  assert(_num_node_ids  > 1, "Should have more than one active memory nodes %u", _num_node_ids);
+
+  for (int i = 0; i < NodeDataItemsSentinel; i++) {
+    _node_data[i] = new NodeDataArray(_num_node_ids);
+  }
+}
+
+G1NUMAStats::~G1NUMAStats() {
+  for (int i = 0; i < NodeDataItemsSentinel; i++) {
+    delete _node_data[i];
+  }
+}
+
+void G1NUMAStats::clear(G1NUMAStats::NodeDataItems phase) {
+  _node_data[phase]->clear();
+}
+
+void G1NUMAStats::update(G1NUMAStats::NodeDataItems phase,
+                         uint requested_node_index,
+                         uint allocated_node_index) {
+  _node_data[phase]->increase(requested_node_index, allocated_node_index);
+}
+
+void G1NUMAStats::copy(G1NUMAStats::NodeDataItems phase,
+                       uint requested_node_index,
+                       size_t* allocated_stat) {
+  _node_data[phase]->copy(requested_node_index, allocated_stat);
+}
+
+static const char* phase_to_explanatory_string(G1NUMAStats::NodeDataItems phase) {
+  switch(phase) {
+    case G1NUMAStats::NewRegionAlloc:
+      return "Placement match ratio";
+    case G1NUMAStats::LocalObjProcessAtCopyToSurv:
+      return "Worker task locality match ratio";
+    default:
+      return "";
+  }
+}
+
+#define RATE_TOTAL_FORMAT "%0.0f%% " SIZE_FORMAT "/" SIZE_FORMAT
+
+void G1NUMAStats::print_info(G1NUMAStats::NodeDataItems phase) {
+  LogTarget(Info, gc, heap, numa) lt;
+
+  if (lt.is_enabled()) {
+    LogStream ls(lt);
+    Stat result;
+    size_t array_width = _num_node_ids;
+
+    _node_data[phase]->create_hit_rate(&result);
+
+    ls.print("%s: " RATE_TOTAL_FORMAT " (",
+             phase_to_explanatory_string(phase), result.rate(), result._hit, result._requested);
+
+    for (uint i = 0; i < array_width; i++) {
+      if (i != 0) {
+        ls.print(", ");
+      }
+      _node_data[phase]->create_hit_rate(&result, i);
+      ls.print("%d: " RATE_TOTAL_FORMAT,
+               _node_ids[i], result.rate(), result._hit, result._requested);
+    }
+    ls.print_cr(")");
+  }
+}
+
+void G1NUMAStats::print_mutator_alloc_stat_debug() {
+  LogTarget(Debug, gc, heap, numa) lt;
+
+  if (lt.is_enabled()) {
+    LogStream ls(lt);
+    uint array_width = _num_node_ids;
+
+    ls.print("Allocated NUMA ids    ");
+    for (uint i = 0; i < array_width; i++) {
+      ls.print("%8d", _node_ids[i]);
+    }
+    ls.print_cr("   Total");
+
+    ls.print("Requested NUMA id ");
+    for (uint req = 0; req < array_width; req++) {
+      ls.print("%3d ", _node_ids[req]);
+      for (uint alloc = 0; alloc < array_width; alloc++) {
+        ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->get(req, alloc));
+      }
+      ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->sum(req));
+      ls.print_cr("");
+      // Add padding to align with the string 'Requested NUMA id'.
+      ls.print("                  ");
+    }
+    ls.print("Any ");
+    for (uint alloc = 0; alloc < array_width; alloc++) {
+      ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->get(array_width, alloc));
+    }
+    ls.print(SIZE_FORMAT_W(8), _node_data[NewRegionAlloc]->sum(array_width));
+    ls.print_cr("");
+  }
+}
+
+void G1NUMAStats::print_statistics() {
+  print_info(NewRegionAlloc);
+  print_mutator_alloc_stat_debug();
+
+  print_info(LocalObjProcessAtCopyToSurv);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1NUMAStats.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_NODE_TIMES_HPP
+#define SHARE_VM_GC_G1_NODE_TIMES_HPP
+
+#include "memory/allocation.hpp"
+
+// Manages statistics of multi nodes.
+class G1NUMAStats : public CHeapObj<mtGC> {
+  struct Stat {
+    // Hit count: if requested id equals to returned id.
+    size_t _hit;
+    // Total request count
+    size_t _requested;
+
+    // Hit count / total request count
+    double rate() const;
+  };
+
+  // Holds data array which has a size of (node count) * (node count + 1) to
+  // represent request node * allocated node. The request node includes any node case.
+  // All operations are NOT thread-safe.
+  // The row index indicates a requested node index while the column node index
+  // indicates an allocated node index. The last row is for any node index request.
+  // E.g. (req, alloc) = (0,0) (1,0) (2,0) (0,1) (Any, 3) (0,2) (0,3) (0,3) (3,3)
+  // Allocated node index      0    1    2    3  Total
+  // Requested node index 0    1    1    1    2    5
+  //                      1    1    0    0    0    1
+  //                      2    1    0    0    0    1
+  //                      3    0    0    0    1    1
+  //                    Any    0    0    0    1    1
+  class NodeDataArray : public CHeapObj<mtGC> {
+    // The number of nodes.
+    uint _num_column;
+    // The number of nodes + 1 (for any node request)
+    uint _num_row;
+    // 2-dimension array that holds count of allocated / requested node index.
+    size_t** _data;
+
+  public:
+    NodeDataArray(uint num_nodes);
+    ~NodeDataArray();
+
+    // Create Stat result of hit count, requested count and hit rate.
+    // The result is copied to the given result parameter.
+    void create_hit_rate(Stat* result) const;
+    // Create Stat result of hit count, requested count and hit rate of the given index.
+    // The result is copied to the given result parameter.
+    void create_hit_rate(Stat* result, uint req_index) const;
+    // Return sum of the given index.
+    size_t sum(uint req_index) const;
+    // Increase at the request / allocated index.
+    void increase(uint req_index, uint alloc_index);
+    // Clear all data.
+    void clear();
+    // Return current value of the given request / allocated index.
+    size_t get(uint req_index, uint alloc_index);
+    // Copy values of the given request index.
+    void copy(uint req_index, size_t* stat);
+  };
+
+public:
+  enum NodeDataItems {
+    // Statistics of a new region allocation.
+    NewRegionAlloc,
+    // Statistics of object processing during copy to survivor region.
+    LocalObjProcessAtCopyToSurv,
+    NodeDataItemsSentinel
+  };
+
+private:
+  const int* _node_ids;
+  uint _num_node_ids;
+
+  NodeDataArray* _node_data[NodeDataItemsSentinel];
+
+  void print_info(G1NUMAStats::NodeDataItems phase);
+
+  void print_mutator_alloc_stat_debug();
+
+public:
+  G1NUMAStats(const int* node_ids, uint num_node_ids);
+  ~G1NUMAStats();
+
+  void clear(G1NUMAStats::NodeDataItems phase);
+
+  // Update the given phase of requested and allocated node index.
+  void update(G1NUMAStats::NodeDataItems phase, uint requested_node_index, uint allocated_node_index);
+
+  // Copy all allocated statistics of the given phase and requested node.
+  // Precondition: allocated_stat should have same length of active nodes.
+  void copy(G1NUMAStats::NodeDataItems phase, uint requested_node_index, size_t* allocated_stat);
+
+  void print_statistics();
+};
+
+#endif // SHARE_VM_GC_G1_NODE_TIMES_HPP
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -124,6 +124,11 @@
   return _low_boundary + index * _page_size;
 }
 
+size_t G1PageBasedVirtualSpace::page_size() const {
+  assert(_page_size > 0, "Page size is not yet initialized.");
+  return _page_size;
+}
+
 bool G1PageBasedVirtualSpace::is_after_last_page(size_t index) const {
   guarantee(index <= _committed.size(),
             "Given boundary page " SIZE_FORMAT " is beyond managed page count " SIZE_FORMAT, index, _committed.size());
--- a/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1PageBasedVirtualSpace.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -92,8 +92,6 @@
 
   // Returns the index of the page which contains the given address.
   size_t  addr_to_page_index(char* addr) const;
-  // Returns the address of the given page index.
-  char*  page_start(size_t index) const;
 
   // Is the given page index the last page?
   bool is_last_page(size_t index) const { return index == (_committed.size() - 1); }
@@ -147,6 +145,10 @@
 
   void check_for_contiguity() PRODUCT_RETURN;
 
+  // Returns the address of the given page index.
+  char*  page_start(size_t index) const;
+  size_t page_size() const;
+
   // Debugging
   void print_on(outputStream* out) PRODUCT_RETURN;
   void print();
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -57,7 +57,9 @@
     _stack_trim_lower_threshold(GCDrainStackTargetSize),
     _trim_ticks(),
     _old_gen_is_full(false),
-    _num_optional_regions(optional_cset_length)
+    _num_optional_regions(optional_cset_length),
+    _numa(g1h->numa()),
+    _obj_alloc_stat(NULL)
 {
   // We allocate number of young gen regions in the collection set plus one
   // entries, since entry 0 keeps track of surviving bytes for non-young regions.
@@ -79,6 +81,8 @@
   _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
 
   _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
+
+  initialize_numa_stats();
 }
 
 // Pass locally gathered statistics to global state.
@@ -92,6 +96,7 @@
   for (uint i = 0; i < length; i++) {
     surviving_young_words[i] += _surviving_young_words[i];
   }
+  flush_numa_stats();
 }
 
 G1ParScanThreadState::~G1ParScanThreadState() {
@@ -99,6 +104,7 @@
   delete _closures;
   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
   delete[] _oops_into_optional_regions;
+  FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
 }
 
 size_t G1ParScanThreadState::lab_waste_words() const {
@@ -152,11 +158,11 @@
   } while (!_refs->is_empty());
 }
 
-HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr const region_attr,
-                                                      G1HeapRegionAttr* dest,
+HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
                                                       size_t word_sz,
-                                                      bool previous_plab_refill_failed) {
-  assert(region_attr.is_in_cset_or_humongous(), "Unexpected region attr type: %s", region_attr.get_type_str());
+                                                      bool previous_plab_refill_failed,
+                                                      uint node_index) {
+
   assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
 
   // Right now we only have two types of regions (young / old) so
@@ -165,7 +171,8 @@
     bool plab_refill_in_old_failed = false;
     HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
                                                         word_sz,
-                                                        &plab_refill_in_old_failed);
+                                                        &plab_refill_in_old_failed,
+                                                        node_index);
     // Make sure that we won't attempt to copy any other objects out
     // of a survivor region (given that apparently we cannot allocate
     // any new ones) to avoid coming into this slow path again and again.
@@ -204,8 +211,8 @@
 
 void G1ParScanThreadState::report_promotion_event(G1HeapRegionAttr const dest_attr,
                                                   oop const old, size_t word_sz, uint age,
-                                                  HeapWord * const obj_ptr) const {
-  PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr);
+                                                  HeapWord * const obj_ptr, uint node_index) const {
+  PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_attr, node_index);
   if (alloc_buf->contains(obj_ptr)) {
     _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz * HeapWordSize, age,
                                                              dest_attr.type() == G1HeapRegionAttr::Old,
@@ -228,24 +235,30 @@
   if (_old_gen_is_full && dest_attr.is_old()) {
     return handle_evacuation_failure_par(old, old_mark);
   }
-  HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz);
+  HeapRegion* const from_region = _g1h->heap_region_containing(old);
+  uint node_index = from_region->node_index();
+
+  HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
 
   // PLAB allocations should succeed most of the time, so we'll
   // normally check against NULL once and that's it.
   if (obj_ptr == NULL) {
     bool plab_refill_failed = false;
-    obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed);
+    obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index);
     if (obj_ptr == NULL) {
-      obj_ptr = allocate_in_next_plab(region_attr, &dest_attr, word_sz, plab_refill_failed);
+      assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str());
+      obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index);
       if (obj_ptr == NULL) {
         // This will either forward-to-self, or detect that someone else has
         // installed a forwarding pointer.
         return handle_evacuation_failure_par(old, old_mark);
       }
     }
+    update_numa_stats(node_index);
+
     if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
       // The events are checked individually as part of the actual commit
-      report_promotion_event(dest_attr, old, word_sz, age, obj_ptr);
+      report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);
     }
   }
 
@@ -257,7 +270,7 @@
   if (_g1h->evacuation_should_fail()) {
     // Doing this after all the allocation attempts also tests the
     // undo_allocation() method too.
-    _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
+    _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
     return handle_evacuation_failure_par(old, old_mark);
   }
 #endif // !PRODUCT
@@ -270,7 +283,6 @@
   if (forward_ptr == NULL) {
     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
 
-    HeapRegion* const from_region = _g1h->heap_region_containing(old);
     const uint young_index = from_region->young_index_in_cset();
 
     assert((from_region->is_young() && young_index >  0) ||
@@ -323,7 +335,7 @@
     }
     return obj;
   } else {
-    _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz);
+    _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
     return forward_ptr;
   }
 }
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -95,6 +95,13 @@
   size_t _num_optional_regions;
   G1OopStarChunkedList* _oops_into_optional_regions;
 
+  G1NUMA* _numa;
+
+  // Records how many object allocations happened at each node during copy to survivor.
+  // Only starts recording when log of gc+heap+numa is enabled and its data is
+  // transferred when flushed.
+  size_t* _obj_alloc_stat;
+
 public:
   G1ParScanThreadState(G1CollectedHeap* g1h,
                        G1RedirtyCardsQueueSet* rdcqs,
@@ -187,27 +194,32 @@
   inline void dispatch_reference(StarTask ref);
 
   // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
-  // allocate into dest. State is the original (source) cset state for the object
-  // that is allocated for. Previous_plab_refill_failed indicates whether previously
-  // a PLAB refill into "state" failed.
+  // allocate into dest. Previous_plab_refill_failed indicates whether previous
+  // PLAB refill for the original (source) object failed.
   // Returns a non-NULL pointer if successful, and updates dest if required.
   // Also determines whether we should continue to try to allocate into the various
   // generations or just end trying to allocate.
-  HeapWord* allocate_in_next_plab(G1HeapRegionAttr const region_attr,
-                                  G1HeapRegionAttr* dest,
+  HeapWord* allocate_in_next_plab(G1HeapRegionAttr* dest,
                                   size_t word_sz,
-                                  bool previous_plab_refill_failed);
+                                  bool previous_plab_refill_failed,
+                                  uint node_index);
 
   inline G1HeapRegionAttr next_region_attr(G1HeapRegionAttr const region_attr, markWord const m, uint& age);
 
   void report_promotion_event(G1HeapRegionAttr const dest_attr,
                               oop const old, size_t word_sz, uint age,
-                              HeapWord * const obj_ptr) const;
+                              HeapWord * const obj_ptr, uint node_index) const;
 
   inline bool needs_partial_trimming() const;
   inline bool is_partially_trimmed() const;
 
   inline void trim_queue_to_threshold(uint threshold);
+
+  // NUMA statistics related methods.
+  inline void initialize_numa_stats();
+  inline void flush_numa_stats();
+  inline void update_numa_stats(uint node_index);
+
 public:
   oop copy_to_survivor_space(G1HeapRegionAttr const region_attr, oop const obj, markWord const old_mark);
 
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -230,4 +230,30 @@
   return &_oops_into_optional_regions[hr->index_in_opt_cset()];
 }
 
+void G1ParScanThreadState::initialize_numa_stats() {
+  if (_numa->is_enabled()) {
+    LogTarget(Info, gc, heap, numa) lt;
+
+    if (lt.is_enabled()) {
+      uint num_nodes = _numa->num_active_nodes();
+      // Record only if there are multiple active nodes.
+      _obj_alloc_stat = NEW_C_HEAP_ARRAY(size_t, num_nodes, mtGC);
+      memset(_obj_alloc_stat, 0, sizeof(size_t) * num_nodes);
+    }
+  }
+}
+
+void G1ParScanThreadState::flush_numa_stats() {
+  if (_obj_alloc_stat != NULL) {
+    uint node_index = _numa->index_of_current_thread();
+    _numa->copy_statistics(G1NUMAStats::LocalObjProcessAtCopyToSurv, node_index, _obj_alloc_stat);
+  }
+}
+
+void G1ParScanThreadState::update_numa_stats(uint node_index) {
+  if (_obj_alloc_stat != NULL) {
+    _obj_alloc_stat[node_index]++;
+  }
+}
+
 #endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1BiasedArray.hpp"
+#include "gc/g1/g1NUMA.hpp"
 #include "gc/g1/g1RegionToSpaceMapper.hpp"
 #include "logging/log.hpp"
 #include "memory/allocation.inline.hpp"
@@ -44,7 +45,8 @@
   _listener(NULL),
   _storage(rs, used_size, page_size),
   _region_granularity(region_granularity),
-  _commit_map(rs.size() * commit_factor / region_granularity, mtGC) {
+  _commit_map(rs.size() * commit_factor / region_granularity, mtGC),
+  _memory_type(type) {
   guarantee(is_power_of_2(page_size), "must be");
   guarantee(is_power_of_2(region_granularity), "must be");
 
@@ -72,10 +74,18 @@
   }
 
   virtual void commit_regions(uint start_idx, size_t num_regions, WorkGang* pretouch_gang) {
-    size_t const start_page = (size_t)start_idx * _pages_per_region;
-    bool zero_filled = _storage.commit(start_page, num_regions * _pages_per_region);
+    const size_t start_page = (size_t)start_idx * _pages_per_region;
+    const size_t size_in_pages = num_regions * _pages_per_region;
+    bool zero_filled = _storage.commit(start_page, size_in_pages);
+    if (_memory_type == mtJavaHeap) {
+      for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) {
+        void* address = _storage.page_start(region_index * _pages_per_region);
+        size_t size_in_bytes = _storage.page_size() * _pages_per_region;
+        G1NUMA::numa()->request_memory_on_node(address, size_in_bytes, region_index);
+      }
+    }
     if (AlwaysPreTouch) {
-      _storage.pretouch(start_page, num_regions * _pages_per_region, pretouch_gang);
+      _storage.pretouch(start_page, size_in_pages, pretouch_gang);
     }
     _commit_map.set_range(start_idx, start_idx + num_regions);
     fire_on_commit(start_idx, num_regions, zero_filled);
@@ -126,26 +136,32 @@
     size_t num_committed = 0;
 
     bool all_zero_filled = true;
+    G1NUMA* numa = G1NUMA::numa();
 
-    for (uint i = start_idx; i < start_idx + num_regions; i++) {
-      assert(!_commit_map.at(i), "Trying to commit storage at region %u that is already committed", i);
-      size_t idx = region_idx_to_page_idx(i);
-      uint old_refcount = _refcounts.get_by_index(idx);
+    for (uint region_idx = start_idx; region_idx < start_idx + num_regions; region_idx++) {
+      assert(!_commit_map.at(region_idx), "Trying to commit storage at region %u that is already committed", region_idx);
+      size_t page_idx = region_idx_to_page_idx(region_idx);
+      uint old_refcount = _refcounts.get_by_index(page_idx);
 
       bool zero_filled = false;
       if (old_refcount == 0) {
         if (first_committed == NoPage) {
-          first_committed = idx;
+          first_committed = page_idx;
           num_committed = 1;
         } else {
           num_committed++;
         }
-        zero_filled = _storage.commit(idx, 1);
+        zero_filled = _storage.commit(page_idx, 1);
+        if (_memory_type == mtJavaHeap) {
+          void* address = _storage.page_start(page_idx);
+          size_t size_in_bytes = _storage.page_size();
+          numa->request_memory_on_node(address, size_in_bytes, region_idx);
+        }
       }
       all_zero_filled &= zero_filled;
 
-      _refcounts.set_by_index(idx, old_refcount + 1);
-      _commit_map.set_bit(i);
+      _refcounts.set_by_index(page_idx, old_refcount + 1);
+      _commit_map.set_bit(region_idx);
     }
     if (AlwaysPreTouch && num_committed > 0) {
       _storage.pretouch(first_committed, num_committed, pretouch_gang);
--- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -53,6 +53,8 @@
   // Mapping management
   CHeapBitMap _commit_map;
 
+  MemoryType _memory_type;
+
   G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemoryType type);
 
   void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionsOnNodes.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1NUMA.hpp"
+#include "gc/g1/g1RegionsOnNodes.hpp"
+#include "gc/g1/heapRegion.hpp"
+
+G1RegionsOnNodes::G1RegionsOnNodes() : _count_per_node(NULL), _numa(G1NUMA::numa()) {
+  _count_per_node = NEW_C_HEAP_ARRAY(uint, _numa->num_active_nodes(), mtGC);
+  clear();
+}
+
+G1RegionsOnNodes::~G1RegionsOnNodes() {
+  FREE_C_HEAP_ARRAY(uint, _count_per_node);
+}
+
+uint G1RegionsOnNodes::add(HeapRegion* hr) {
+  uint node_index = hr->node_index();
+
+  // Update only if the node index is valid.
+  if (node_index < _numa->num_active_nodes()) {
+    *(_count_per_node + node_index) += 1;
+    return node_index;
+  }
+
+  return G1NUMA::UnknownNodeIndex;
+}
+
+void G1RegionsOnNodes::clear() {
+  for (uint i = 0; i < _numa->num_active_nodes(); i++) {
+    _count_per_node[i] = 0;
+  }
+}
+
+uint G1RegionsOnNodes::count(uint node_index) const {
+  return _count_per_node[node_index];
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1RegionsOnNodes.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1REGIONS_HPP
+#define SHARE_VM_GC_G1_G1REGIONS_HPP
+
+#include "memory/allocation.hpp"
+
+class G1NUMA;
+class HeapRegion;
+
+// Contains per node index region count
+class G1RegionsOnNodes : public StackObj {
+  volatile uint* _count_per_node;
+  G1NUMA*        _numa;
+
+public:
+  G1RegionsOnNodes();
+
+  ~G1RegionsOnNodes();
+
+  // Increase _count_per_node for the node of given heap region and returns node index.
+  uint add(HeapRegion* hr);
+
+  void clear();
+
+  uint count(uint node_index) const;
+};
+
+#endif // SHARE_VM_GC_G1_G1REGIONS_HPP
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -30,17 +30,23 @@
 
 G1SurvivorRegions::G1SurvivorRegions() :
   _regions(new (ResourceObj::C_HEAP, mtGC) GrowableArray<HeapRegion*>(8, true, mtGC)),
-  _used_bytes(0) {}
+  _used_bytes(0),
+  _regions_on_node() {}
 
-void G1SurvivorRegions::add(HeapRegion* hr) {
+uint G1SurvivorRegions::add(HeapRegion* hr) {
   assert(hr->is_survivor(), "should be flagged as survivor region");
   _regions->append(hr);
+  return _regions_on_node.add(hr);
 }
 
 uint G1SurvivorRegions::length() const {
   return (uint)_regions->length();
 }
 
+uint G1SurvivorRegions::regions_on_node(uint node_index) const {
+  return _regions_on_node.count(node_index);
+}
+
 void G1SurvivorRegions::convert_to_eden() {
   for (GrowableArrayIterator<HeapRegion*> it = _regions->begin();
        it != _regions->end();
@@ -54,6 +60,7 @@
 void G1SurvivorRegions::clear() {
   _regions->clear();
   _used_bytes = 0;
+  _regions_on_node.clear();
 }
 
 void G1SurvivorRegions::add_used_bytes(size_t used_bytes) {
--- a/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1SurvivorRegions.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -25,6 +25,7 @@
 #ifndef SHARE_GC_G1_G1SURVIVORREGIONS_HPP
 #define SHARE_GC_G1_G1SURVIVORREGIONS_HPP
 
+#include "gc/g1/g1RegionsOnNodes.hpp"
 #include "runtime/globals.hpp"
 
 template <typename T>
@@ -35,17 +36,19 @@
 private:
   GrowableArray<HeapRegion*>* _regions;
   volatile size_t             _used_bytes;
+  G1RegionsOnNodes            _regions_on_node;
 
 public:
   G1SurvivorRegions();
 
-  void add(HeapRegion* hr);
+  virtual uint add(HeapRegion* hr);
 
   void convert_to_eden();
 
   void clear();
 
   uint length() const;
+  uint regions_on_node(uint node_index) const;
 
   const GrowableArray<HeapRegion*>* regions() const {
     return _regions;
--- a/src/hotspot/share/gc/g1/g1VMOperations.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -40,17 +40,61 @@
   _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */);
 }
 
+VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before,
+                                                   GCCause::Cause gc_cause,
+                                                   double target_pause_time_ms) :
+  VM_GC_Operation(gc_count_before, gc_cause),
+  _target_pause_time_ms(target_pause_time_ms),
+  _transient_failure(false),
+  _cycle_already_in_progress(false),
+  _gc_succeeded(false)
+{}
+
+bool VM_G1TryInitiateConcMark::doit_prologue() {
+  bool result = VM_GC_Operation::doit_prologue();
+  // The prologue can fail for a couple of reasons. The first is that another GC
+  // got scheduled and prevented the scheduling of the initial mark GC. The
+  // second is that the GC locker may be active and the heap can't be expanded.
+  // In both cases we want to retry the GC so that the initial mark pause is
+  // actually scheduled. In the second case, however, we should stall until
+  // until the GC locker is no longer active and then retry the initial mark GC.
+  if (!result) _transient_failure = true;
+  return result;
+}
+
+void VM_G1TryInitiateConcMark::doit() {
+  G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+  GCCauseSetter x(g1h, _gc_cause);
+  if (!g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause)) {
+    // Failure to force the next GC pause to be an initial mark indicates
+    // there is already a concurrent marking cycle in progress.  Set flag
+    // to notify the caller and return immediately.
+    _cycle_already_in_progress = true;
+  } else if (!g1h->do_collection_pause_at_safepoint(_target_pause_time_ms)) {
+    // Failure to perform the collection at all occurs because GCLocker is
+    // active, and we have the bad luck to be the collection request that
+    // makes a later _gc_locker collection needed.  (Else we would have hit
+    // the GCLocker check in the prologue.)
+    _transient_failure = true;
+  } else if (g1h->should_upgrade_to_full_gc(_gc_cause)) {
+    // GC ran, but we're still in trouble and need a full GC.
+    log_info(gc, ergo)("Attempting maximally compacting collection");
+    _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */
+                                            true /* clear_all_soft_refs */);
+    guarantee(_gc_succeeded, "Elevated collections during the safepoint must always succeed");
+  } else {
+    _gc_succeeded = true;
+  }
+}
+
 VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t         word_size,
                                                      uint           gc_count_before,
                                                      GCCause::Cause gc_cause,
-                                                     bool           should_initiate_conc_mark,
                                                      double         target_pause_time_ms) :
   VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
   _gc_succeeded(false),
-  _should_initiate_conc_mark(should_initiate_conc_mark),
-  _should_retry_gc(false),
-  _target_pause_time_ms(target_pause_time_ms),
-  _old_marking_cycles_completed_before(0) {
+  _target_pause_time_ms(target_pause_time_ms) {
 
   guarantee(target_pause_time_ms > 0.0,
             "target_pause_time_ms = %1.6lf should be positive",
@@ -58,26 +102,8 @@
   _gc_cause = gc_cause;
 }
 
-bool VM_G1CollectForAllocation::doit_prologue() {
-  bool res = VM_CollectForAllocation::doit_prologue();
-  if (!res) {
-    if (_should_initiate_conc_mark) {
-      // The prologue can fail for a couple of reasons. The first is that another GC
-      // got scheduled and prevented the scheduling of the initial mark GC. The
-      // second is that the GC locker may be active and the heap can't be expanded.
-      // In both cases we want to retry the GC so that the initial mark pause is
-      // actually scheduled. In the second case, however, we should stall until
-      // until the GC locker is no longer active and then retry the initial mark GC.
-      _should_retry_gc = true;
-    }
-  }
-  return res;
-}
-
 void VM_G1CollectForAllocation::doit() {
   G1CollectedHeap* g1h = G1CollectedHeap::heap();
-  assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
-      "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
 
   if (_word_size > 0) {
     // An allocation has been requested. So, try to do that first.
@@ -92,44 +118,6 @@
   }
 
   GCCauseSetter x(g1h, _gc_cause);
-  if (_should_initiate_conc_mark) {
-    // It's safer to read old_marking_cycles_completed() here, given
-    // that noone else will be updating it concurrently. Since we'll
-    // only need it if we're initiating a marking cycle, no point in
-    // setting it earlier.
-    _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
-
-    // At this point we are supposed to start a concurrent cycle. We
-    // will do so if one is not already in progress.
-    bool res = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause);
-
-    // The above routine returns true if we were able to force the
-    // next GC pause to be an initial mark; it returns false if a
-    // marking cycle is already in progress.
-    //
-    // If a marking cycle is already in progress just return and skip the
-    // pause below - if the reason for requesting this initial mark pause
-    // was due to a System.gc() then the requesting thread should block in
-    // doit_epilogue() until the marking cycle is complete.
-    //
-    // If this initial mark pause was requested as part of a humongous
-    // allocation then we know that the marking cycle must just have
-    // been started by another thread (possibly also allocating a humongous
-    // object) as there was no active marking cycle when the requesting
-    // thread checked before calling collect() in
-    // attempt_allocation_humongous(). Retrying the GC, in this case,
-    // will cause the requesting thread to spin inside collect() until the
-    // just started marking cycle is complete - which may be a while. So
-    // we do NOT retry the GC.
-    if (!res) {
-      assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
-      if (_gc_cause != GCCause::_g1_humongous_allocation) {
-        _should_retry_gc = true;
-      }
-      return;
-    }
-  }
-
   // Try a partial collection of some kind.
   _gc_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
 
@@ -138,66 +126,15 @@
       // An allocation had been requested. Do it, eventually trying a stronger
       // kind of GC.
       _result = g1h->satisfy_failed_allocation(_word_size, &_gc_succeeded);
-    } else {
-      bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause);
-
-      if (should_upgrade_to_full) {
-        // There has been a request to perform a GC to free some space. We have no
-        // information on how much memory has been asked for. In case there are
-        // absolutely no regions left to allocate into, do a maximally compacting full GC.
-        log_info(gc, ergo)("Attempting maximally compacting collection");
-        _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */
-                                                   true   /* clear_all_soft_refs */);
-      }
+    } else if (g1h->should_upgrade_to_full_gc(_gc_cause)) {
+      // There has been a request to perform a GC to free some space. We have no
+      // information on how much memory has been asked for. In case there are
+      // absolutely no regions left to allocate into, do a maximally compacting full GC.
+      log_info(gc, ergo)("Attempting maximally compacting collection");
+      _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */
+                                              true   /* clear_all_soft_refs */);
     }
     guarantee(_gc_succeeded, "Elevated collections during the safepoint must always succeed.");
-  } else {
-    assert(_result == NULL, "invariant");
-    // The only reason for the pause to not be successful is that, the GC locker is
-    // active (or has become active since the prologue was executed). In this case
-    // we should retry the pause after waiting for the GC locker to become inactive.
-    _should_retry_gc = true;
-  }
-}
-
-void VM_G1CollectForAllocation::doit_epilogue() {
-  VM_CollectForAllocation::doit_epilogue();
-
-  // If the pause was initiated by a System.gc() and
-  // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
-  // that just started (or maybe one that was already in progress) to
-  // finish.
-  if (GCCause::is_user_requested_gc(_gc_cause) &&
-      _should_initiate_conc_mark) {
-    assert(ExplicitGCInvokesConcurrent,
-           "the only way to be here is if ExplicitGCInvokesConcurrent is set");
-
-    G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
-    // In the doit() method we saved g1h->old_marking_cycles_completed()
-    // in the _old_marking_cycles_completed_before field. We have to
-    // wait until we observe that g1h->old_marking_cycles_completed()
-    // has increased by at least one. This can happen if a) we started
-    // a cycle and it completes, b) a cycle already in progress
-    // completes, or c) a Full GC happens.
-
-    // If the condition has already been reached, there's no point in
-    // actually taking the lock and doing the wait.
-    if (g1h->old_marking_cycles_completed() <=
-                                          _old_marking_cycles_completed_before) {
-      // The following is largely copied from CMS
-
-      Thread* thr = Thread::current();
-      assert(thr->is_Java_thread(), "invariant");
-      JavaThread* jt = (JavaThread*)thr;
-      ThreadToNativeFromVM native(jt);
-
-      MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
-      while (g1h->old_marking_cycles_completed() <=
-                                          _old_marking_cycles_completed_before) {
-        ml.wait();
-      }
-    }
   }
 }
 
--- a/src/hotspot/share/gc/g1/g1VMOperations.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1VMOperations.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -45,29 +45,39 @@
     _gc_succeeded(false) { }
   virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
   virtual void doit();
-  bool gc_succeeded() { return _gc_succeeded; }
+  bool gc_succeeded() const { return _gc_succeeded; }
+};
+
+class VM_G1TryInitiateConcMark : public VM_GC_Operation {
+  double _target_pause_time_ms;
+  bool _transient_failure;
+  bool _cycle_already_in_progress;
+  bool _gc_succeeded;
+
+public:
+  VM_G1TryInitiateConcMark(uint gc_count_before,
+                           GCCause::Cause gc_cause,
+                           double target_pause_time_ms);
+  virtual VMOp_Type type() const { return VMOp_G1TryInitiateConcMark; }
+  virtual bool doit_prologue();
+  virtual void doit();
+  bool transient_failure() const { return _transient_failure; }
+  bool cycle_already_in_progress() const { return _cycle_already_in_progress; }
+  bool gc_succeeded() const { return _gc_succeeded; }
 };
 
 class VM_G1CollectForAllocation : public VM_CollectForAllocation {
   bool _gc_succeeded;
-
-  bool _should_initiate_conc_mark;
-  bool _should_retry_gc;
   double _target_pause_time_ms;
-  uint  _old_marking_cycles_completed_before;
 
 public:
   VM_G1CollectForAllocation(size_t         word_size,
                             uint           gc_count_before,
                             GCCause::Cause gc_cause,
-                            bool           should_initiate_conc_mark,
                             double         target_pause_time_ms);
   virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
-  virtual bool doit_prologue();
   virtual void doit();
-  virtual void doit_epilogue();
-  bool should_retry_gc() const { return _should_retry_gc; }
-  bool gc_succeeded() { return _gc_succeeded; }
+  bool gc_succeeded() const { return _gc_succeeded; }
 };
 
 // Concurrent G1 stop-the-world operations such as remark and cleanup.
--- a/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/g1YoungRemSetSamplingThread.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -90,8 +90,7 @@
   if ((os::elapsedTime() - _last_periodic_gc_attempt_s) > (G1PeriodicGCInterval / 1000.0)) {
     log_debug(gc, periodic)("Checking for periodic GC.");
     if (should_start_periodic_gc()) {
-      if (!G1CollectedHeap::heap()->try_collect(GCCause::_g1_periodic_collection,
-                                                    false /* retry_on_vmop_failure */)) {
+      if (!G1CollectedHeap::heap()->try_collect(GCCause::_g1_periodic_collection)) {
         log_debug(gc, periodic)("GC request denied. Skipping.");
       }
     }
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -28,6 +28,7 @@
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1CollectionSet.hpp"
 #include "gc/g1/g1HeapRegionTraceType.hpp"
+#include "gc/g1/g1NUMA.hpp"
 #include "gc/g1/g1OopClosures.inline.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "gc/g1/heapRegionBounds.inline.hpp"
@@ -252,7 +253,8 @@
   _index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1),
   _surv_rate_group(NULL), _age_index(-1),
   _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL),
-  _recorded_rs_length(0), _predicted_elapsed_time_ms(0)
+  _recorded_rs_length(0), _predicted_elapsed_time_ms(0),
+  _node_index(G1NUMA::UnknownNodeIndex)
 {
   _rem_set = new HeapRegionRemSet(bot, this);
 
@@ -470,8 +472,17 @@
   } else {
     st->print("|  ");
   }
-  st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ",
+  st->print("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s ",
                p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str());
+  if (UseNUMA) {
+    G1NUMA* numa = G1NUMA::numa();
+    if (node_index() < numa->num_active_nodes()) {
+      st->print("|%d", numa->numa_id(node_index()));
+    } else {
+      st->print("|-");
+    }
+  }
+  st->print_cr("");
 }
 
 class G1VerificationClosure : public BasicOopIterateClosure {
--- a/src/hotspot/share/gc/g1/heapRegion.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegion.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -253,6 +253,8 @@
   // for the collection set.
   double _predicted_elapsed_time_ms;
 
+  uint _node_index;
+
   // Iterate over the references covered by the given MemRegion in a humongous
   // object and apply the given closure to them.
   // Humongous objects are allocated directly in the old-gen. So we need special
@@ -643,6 +645,9 @@
   // the strong code roots list for this region
   void strong_code_roots_do(CodeBlobClosure* blk) const;
 
+  uint node_index() const { return _node_index; }
+  void set_node_index(uint node_index) { _node_index = node_index; }
+
   // Verify that the entries on the strong code root list for this
   // region are live and include at least one pointer into this region.
   void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
--- a/src/hotspot/share/gc/g1/heapRegionManager.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionManager.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -26,10 +26,12 @@
 #include "gc/g1/g1Arguments.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
 #include "gc/g1/g1ConcurrentRefine.hpp"
+#include "gc/g1/g1NUMAStats.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
 #include "gc/g1/heterogeneousHeapRegionManager.hpp"
+#include "logging/logStream.hpp"
 #include "memory/allocation.hpp"
 #include "utilities/bitMap.inline.hpp"
 
@@ -103,6 +105,34 @@
   return _available_map.at(region);
 }
 
+HeapRegion* HeapRegionManager::allocate_free_region(HeapRegionType type, uint requested_node_index) {
+  HeapRegion* hr = NULL;
+  bool from_head = !type.is_young();
+  G1NUMA* numa = G1NUMA::numa();
+
+  if (requested_node_index != G1NUMA::AnyNodeIndex && numa->is_enabled()) {
+    // Try to allocate with requested node index.
+    hr = _free_list.remove_region_with_node_index(from_head, requested_node_index);
+  }
+
+  if (hr == NULL) {
+    // If there's a single active node or we did not get a region from our requested node,
+    // try without requested node index.
+    hr = _free_list.remove_region(from_head);
+  }
+
+  if (hr != NULL) {
+    assert(hr->next() == NULL, "Single region should not have next");
+    assert(is_available(hr->hrm_index()), "Must be committed");
+
+    if (numa->is_enabled() && hr->node_index() < numa->num_active_nodes()) {
+      numa->update_statistics(G1NUMAStats::NewRegionAlloc, requested_node_index, hr->node_index());
+    }
+  }
+
+  return hr;
+}
+
 #ifdef ASSERT
 bool HeapRegionManager::is_free(HeapRegion* hr) const {
   return _free_list.contains(hr);
@@ -139,6 +169,11 @@
   guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start);
   guarantee(_num_committed >= num_regions, "pre-condition");
 
+  // Reset node index to distinguish with committed regions.
+  for (uint i = start; i < start + num_regions; i++) {
+    at(i)->set_node_index(G1NUMA::UnknownNodeIndex);
+  }
+
   // Print before uncommitting.
   if (G1CollectedHeap::heap()->hr_printer()->is_active()) {
     for (uint i = start; i < start + num_regions; i++) {
@@ -186,6 +221,7 @@
     MemRegion mr(bottom, bottom + HeapRegion::GrainWords);
 
     hr->initialize(mr);
+    hr->set_node_index(G1NUMA::numa()->index_for_region(hr));
     insert_into_free_list(at(i));
   }
 }
@@ -235,6 +271,35 @@
   return expanded;
 }
 
+uint HeapRegionManager::expand_on_preferred_node(uint preferred_index) {
+  uint expand_candidate = UINT_MAX;
+  for (uint i = 0; i < max_length(); i++) {
+    if (is_available(i)) {
+      // Already in use continue
+      continue;
+    }
+    // Always save the candidate so we can expand later on.
+    expand_candidate = i;
+    if (is_on_preferred_index(expand_candidate, preferred_index)) {
+      // We have found a candidate on the preffered node, break.
+      break;
+    }
+  }
+
+  if (expand_candidate == UINT_MAX) {
+     // No regions left, expand failed.
+    return 0;
+  }
+
+  make_regions_available(expand_candidate, 1, NULL);
+  return 1;
+}
+
+bool HeapRegionManager::is_on_preferred_index(uint region_index, uint preferred_node_index) {
+  uint region_node_index = G1NUMA::numa()->preferred_node_index_for_index(region_index);
+  return region_node_index == preferred_node_index;
+}
+
 uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) {
   uint found = 0;
   size_t length_found = 0;
--- a/src/hotspot/share/gc/g1/heapRegionManager.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionManager.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -108,6 +108,9 @@
   // sequence could be found, otherwise res_idx contains the start index of this range.
   uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
 
+  // Checks the G1MemoryNodeManager to see if this region is on the preferred node.
+  bool is_on_preferred_index(uint region_index, uint preferred_node_index);
+
 protected:
   G1HeapRegionTable _regions;
   G1RegionToSpaceMapper* _heap_mapper;
@@ -174,15 +177,8 @@
     _free_list.add_ordered(list);
   }
 
-  virtual HeapRegion* allocate_free_region(HeapRegionType type) {
-    HeapRegion* hr = _free_list.remove_region(!type.is_young());
-
-    if (hr != NULL) {
-      assert(hr->next() == NULL, "Single region should not have next");
-      assert(is_available(hr->hrm_index()), "Must be committed");
-    }
-    return hr;
-  }
+  // Allocate a free region with specific node index. If fails allocate with next node index.
+  virtual HeapRegion* allocate_free_region(HeapRegionType type, uint requested_node_index);
 
   inline void allocate_free_regions_starting_at(uint first, uint num_regions);
 
@@ -196,6 +192,10 @@
     return _free_list.length();
   }
 
+  uint num_free_regions(uint node_index) const {
+    return _free_list.length(node_index);
+  }
+
   size_t total_free_bytes() const {
     return num_free_regions() * HeapRegion::GrainBytes;
   }
@@ -227,6 +227,9 @@
   // this.
   virtual uint expand_at(uint start, uint num_regions, WorkGang* pretouch_workers);
 
+  // Try to expand on the given node index.
+  virtual uint expand_on_preferred_node(uint node_index);
+
   // Find a contiguous set of empty regions of length num. Returns the start index of
   // that set, or G1_NO_HRM_INDEX.
   virtual uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
--- a/src/hotspot/share/gc/g1/heapRegionSet.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionSet.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1NUMA.hpp"
 #include "gc/g1/heapRegionRemSet.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
 
@@ -101,6 +102,9 @@
     curr->set_next(NULL);
     curr->set_prev(NULL);
     curr->set_containing_set(NULL);
+
+    decrease_length(curr->node_index());
+
     curr = next;
   }
   clear();
@@ -119,6 +123,10 @@
     return;
   }
 
+  if (_node_info != NULL && from_list->_node_info != NULL) {
+    _node_info->add(from_list->_node_info);
+  }
+
   #ifdef ASSERT
   FreeRegionListIterator iter(from_list);
   while (iter.more_available()) {
@@ -220,6 +228,9 @@
     remove(curr);
 
     count++;
+
+    decrease_length(curr->node_index());
+
     curr = next;
   }
 
@@ -267,6 +278,10 @@
   _head = NULL;
   _tail = NULL;
   _last = NULL;
+
+  if (_node_info!= NULL) {
+    _node_info->clear();
+  }
 }
 
 void FreeRegionList::verify_list() {
@@ -303,3 +318,41 @@
   guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next");
   guarantee(length() == count, "%s count mismatch. Expected %u, actual %u.", name(), length(), count);
 }
+
+
+FreeRegionList::FreeRegionList(const char* name, HeapRegionSetChecker* checker):
+  HeapRegionSetBase(name, checker),
+  _node_info(G1NUMA::numa()->is_enabled() ? new NodeInfo() : NULL) {
+
+  clear();
+}
+
+FreeRegionList::~FreeRegionList() {
+  if (_node_info != NULL) {
+    delete _node_info;
+  }
+}
+
+FreeRegionList::NodeInfo::NodeInfo() : _numa(G1NUMA::numa()), _length_of_node(NULL),
+                                       _num_nodes(_numa->num_active_nodes()) {
+  assert(UseNUMA, "Invariant");
+
+  _length_of_node = NEW_C_HEAP_ARRAY(uint, _num_nodes, mtGC);
+}
+
+FreeRegionList::NodeInfo::~NodeInfo() {
+  FREE_C_HEAP_ARRAY(uint, _length_of_node);
+}
+
+void FreeRegionList::NodeInfo::clear() {
+  for (uint i = 0; i < _num_nodes; ++i) {
+    _length_of_node[i] = 0;
+  }
+}
+
+void FreeRegionList::NodeInfo::add(NodeInfo* info) {
+  for (uint i = 0; i < _num_nodes; ++i) {
+    _length_of_node[i] += info->_length_of_node[i];
+  }
+}
+
--- a/src/hotspot/share/gc/g1/heapRegionSet.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionSet.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -136,11 +136,33 @@
 // add / remove one region at a time or concatenate two lists.
 
 class FreeRegionListIterator;
+class G1NUMA;
 
 class FreeRegionList : public HeapRegionSetBase {
   friend class FreeRegionListIterator;
 
 private:
+
+  // This class is only initialized if there are multiple active nodes.
+  class NodeInfo : public CHeapObj<mtGC> {
+    G1NUMA* _numa;
+    uint*   _length_of_node;
+    uint    _num_nodes;
+
+  public:
+    NodeInfo();
+    ~NodeInfo();
+
+    inline void increase_length(uint node_index);
+    inline void decrease_length(uint node_index);
+
+    inline uint length(uint index) const;
+
+    void clear();
+
+    void add(NodeInfo* info);
+  };
+
   HeapRegion* _head;
   HeapRegion* _tail;
 
@@ -148,20 +170,23 @@
   // time. It helps to improve performance when adding several ordered items in a row.
   HeapRegion* _last;
 
+  NodeInfo*   _node_info;
+
   static uint _unrealistically_long_length;
 
   inline HeapRegion* remove_from_head_impl();
   inline HeapRegion* remove_from_tail_impl();
 
+  inline void increase_length(uint node_index);
+  inline void decrease_length(uint node_index);
+
 protected:
   // See the comment for HeapRegionSetBase::clear()
   virtual void clear();
 
 public:
-  FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL):
-    HeapRegionSetBase(name, checker) {
-    clear();
-  }
+  FreeRegionList(const char* name, HeapRegionSetChecker* checker = NULL);
+  ~FreeRegionList();
 
   void verify_list();
 
@@ -181,6 +206,9 @@
   // Removes from head or tail based on the given argument.
   HeapRegion* remove_region(bool from_head);
 
+  HeapRegion* remove_region_with_node_index(bool from_head,
+                                            uint requested_node_index);
+
   // Merge two ordered lists. The result is also ordered. The order is
   // determined by hrm_index.
   void add_ordered(FreeRegionList* from_list);
@@ -196,6 +224,9 @@
   virtual void verify();
 
   uint num_of_regions_in_range(uint start, uint end) const;
+
+  using HeapRegionSetBase::length;
+  uint length(uint node_index) const;
 };
 
 // Iterator class that provides a convenient way to iterate over the
--- a/src/hotspot/share/gc/g1/heapRegionSet.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heapRegionSet.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -25,6 +25,7 @@
 #ifndef SHARE_GC_G1_HEAPREGIONSET_INLINE_HPP
 #define SHARE_GC_G1_HEAPREGIONSET_INLINE_HPP
 
+#include "gc/g1/g1NUMA.hpp"
 #include "gc/g1/heapRegionSet.hpp"
 
 inline void HeapRegionSetBase::add(HeapRegion* hr) {
@@ -94,6 +95,8 @@
     _head = hr;
   }
   _last = hr;
+
+  increase_length(hr->node_index());
 }
 
 inline HeapRegion* FreeRegionList::remove_from_head_impl() {
@@ -144,7 +147,107 @@
 
   // remove() will verify the region and check mt safety.
   remove(hr);
+
+  decrease_length(hr->node_index());
+
   return hr;
 }
 
+inline HeapRegion* FreeRegionList::remove_region_with_node_index(bool from_head,
+                                                                 uint requested_node_index) {
+  assert(UseNUMA, "Invariant");
+
+  const uint max_search_depth = G1NUMA::numa()->max_search_depth();
+  HeapRegion* cur;
+
+  // Find the region to use, searching from _head or _tail as requested.
+  size_t cur_depth = 0;
+  if (from_head) {
+    for (cur = _head;
+         cur != NULL && cur_depth < max_search_depth;
+         cur = cur->next(), ++cur_depth) {
+      if (requested_node_index == cur->node_index()) {
+        break;
+      }
+    }
+  } else {
+    for (cur = _tail;
+         cur != NULL && cur_depth < max_search_depth;
+         cur = cur->prev(), ++cur_depth) {
+      if (requested_node_index == cur->node_index()) {
+        break;
+      }
+    }
+  }
+
+  // Didn't find a region to use.
+  if (cur == NULL || cur_depth >= max_search_depth) {
+    return NULL;
+  }
+
+  // Splice the region out of the list.
+  HeapRegion* prev = cur->prev();
+  HeapRegion* next = cur->next();
+  if (prev == NULL) {
+    _head = next;
+  } else {
+    prev->set_next(next);
+  }
+  if (next == NULL) {
+    _tail = prev;
+  } else {
+    next->set_prev(prev);
+  }
+  cur->set_prev(NULL);
+  cur->set_next(NULL);
+
+  if (_last == cur) {
+    _last = NULL;
+  }
+
+  remove(cur);
+  decrease_length(cur->node_index());
+
+  return cur;
+}
+
+inline void FreeRegionList::NodeInfo::increase_length(uint node_index) {
+  if (node_index < _num_nodes) {
+    _length_of_node[node_index] += 1;
+  }
+}
+
+inline void FreeRegionList::NodeInfo::decrease_length(uint node_index) {
+  if (node_index < _num_nodes) {
+    assert(_length_of_node[node_index] > 0,
+           "Current length %u should be greater than zero for node %u",
+           _length_of_node[node_index], node_index);
+    _length_of_node[node_index] -= 1;
+  }
+}
+
+inline uint FreeRegionList::NodeInfo::length(uint node_index) const {
+  return _length_of_node[node_index];
+}
+
+inline void FreeRegionList::increase_length(uint node_index) {
+  if (_node_info != NULL) {
+    return _node_info->increase_length(node_index);
+  }
+}
+
+inline void FreeRegionList::decrease_length(uint node_index) {
+  if (_node_info != NULL) {
+    return _node_info->decrease_length(node_index);
+  }
+}
+
+inline uint FreeRegionList::length(uint node_index) const {
+  if (_node_info != NULL) {
+    return _node_info->length(node_index);
+  } else {
+    return 0;
+  }
+}
+
 #endif // SHARE_GC_G1_HEAPREGIONSET_INLINE_HPP
--- a/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -263,7 +263,7 @@
   return num_regions_found;
 }
 
-HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(HeapRegionType type) {
+HeapRegion* HeterogeneousHeapRegionManager::allocate_free_region(HeapRegionType type, uint node_index) {
 
   // We want to prevent mutators from proceeding when we have borrowed regions from the last collection. This
   // will force a full collection to remedy the situation.
--- a/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/g1/heterogeneousHeapRegionManager.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -119,7 +119,7 @@
   void prepare_for_full_collection_start();
   void prepare_for_full_collection_end();
 
-  virtual HeapRegion* allocate_free_region(HeapRegionType type);
+  virtual HeapRegion* allocate_free_region(HeapRegionType type, uint node_index);
 
   // Return maximum number of regions that heap can expand to.
   uint max_expandable_length() const;
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -3201,46 +3201,6 @@
   }
 }
 
-void
-PSParallelCompact::move_and_update(ParCompactionManager* cm, SpaceId space_id) {
-  const MutableSpace* sp = space(space_id);
-  if (sp->is_empty()) {
-    return;
-  }
-
-  ParallelCompactData& sd = PSParallelCompact::summary_data();
-  ParMarkBitMap* const bitmap = mark_bitmap();
-  HeapWord* const dp_addr = dense_prefix(space_id);
-  HeapWord* beg_addr = sp->bottom();
-  HeapWord* end_addr = sp->top();
-
-  assert(beg_addr <= dp_addr && dp_addr <= end_addr, "bad dense prefix");
-
-  const size_t beg_region = sd.addr_to_region_idx(beg_addr);
-  const size_t dp_region = sd.addr_to_region_idx(dp_addr);
-  if (beg_region < dp_region) {
-    update_and_deadwood_in_dense_prefix(cm, space_id, beg_region, dp_region);
-  }
-
-  // The destination of the first live object that starts in the region is one
-  // past the end of the partial object entering the region (if any).
-  HeapWord* const dest_addr = sd.partial_obj_end(dp_region);
-  HeapWord* const new_top = _space_info[space_id].new_top();
-  assert(new_top >= dest_addr, "bad new_top value");
-  const size_t words = pointer_delta(new_top, dest_addr);
-
-  if (words > 0) {
-    ObjectStartArray* start_array = _space_info[space_id].start_array();
-    MoveAndUpdateClosure closure(bitmap, cm, start_array, dest_addr, words);
-
-    ParMarkBitMap::IterationStatus status;
-    status = bitmap->iterate(&closure, dest_addr, end_addr);
-    assert(status == ParMarkBitMap::full, "iteration not complete");
-    assert(bitmap->find_obj_beg(closure.source(), end_addr) == end_addr,
-           "live objects skipped because closure is full");
-  }
-}
-
 jlong PSParallelCompact::millis_since_last_gc() {
   // We need a monotonically non-decreasing time in ms but
   // os::javaTimeMillis() does not guarantee monotonicity.
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1130,9 +1130,6 @@
   static inline HeapWord*         dense_prefix(SpaceId space_id);
   static inline ObjectStartArray* start_array(SpaceId space_id);
 
-  // Move and update the live objects in the specified space.
-  static void move_and_update(ParCompactionManager* cm, SpaceId space_id);
-
   // Process the end of the given region range in the dense prefix.
   // This includes saving any object not updated.
   static void dense_prefix_regions_epilogue(ParCompactionManager* cm,
--- a/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -102,8 +102,7 @@
   // partially-scanned arrays (in the latter case, we push an oop to
   // the from-space image of the array and the length on the
   // from-space image indicates how many entries on the array we still
-  // need to scan; this is basically how ParNew does partial array
-  // scanning too). To be able to distinguish between reference
+  // need to scan. To be able to distinguish between reference
   // locations and partially-scanned array oops we simply mask the
   // latter oops with 0x01. The next three methods do the masking,
   // unmasking, and checking whether the oop is masked or not. Notice
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,8 +66,7 @@
 // Methods of protected closure types.
 
 DefNewGeneration::IsAliveClosure::IsAliveClosure(Generation* young_gen) : _young_gen(young_gen) {
-  assert(_young_gen->kind() == Generation::ParNew ||
-         _young_gen->kind() == Generation::DefNew, "Expected the young generation here");
+  assert(_young_gen->kind() == Generation::DefNew, "Expected the young generation here");
 }
 
 bool DefNewGeneration::IsAliveClosure::do_object_b(oop p) {
@@ -884,7 +883,6 @@
       log_trace(gc)("DefNewEpilogue: cause(%s), not full, seen_failed, will_clear_seen_failed",
                             GCCause::to_string(gch->gc_cause()));
       assert(gch->gc_cause() == GCCause::_scavenge_alot ||
-             (GCCause::is_user_requested_gc(gch->gc_cause()) && UseConcMarkSweepGC && ExplicitGCInvokesConcurrent) ||
              !gch->incremental_collection_failed(),
              "Twice in a row");
       seen_incremental_collection_failed = false;
--- a/src/hotspot/share/gc/serial/tenuredGeneration.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/serial/tenuredGeneration.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -39,9 +39,6 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/parOopClosures.hpp"
-#endif
 
 TenuredGeneration::TenuredGeneration(ReservedSpace rs,
                                      size_t initial_byte_size,
--- a/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/adaptiveSizePolicy.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -39,13 +39,11 @@
 class AdaptiveSizePolicy : public CHeapObj<mtGC> {
  friend class GCAdaptivePolicyCounters;
  friend class PSGCAdaptivePolicyCounters;
- friend class CMSGCAdaptivePolicyCounters;
  protected:
 
   enum GCPolicyKind {
     _gc_adaptive_size_policy,
-    _gc_ps_adaptive_size_policy,
-    _gc_cms_adaptive_size_policy
+    _gc_ps_adaptive_size_policy
   };
   virtual GCPolicyKind kind() const { return _gc_adaptive_size_policy; }
 
@@ -77,7 +75,7 @@
 
   // Last calculated sizes, in bytes, and aligned
   size_t _eden_size;        // calculated eden free space in bytes
-  size_t _promo_size;       // calculated cms gen free space in bytes
+  size_t _promo_size;       // calculated promoted free space in bytes
 
   size_t _survivor_size;    // calculated survivor size in bytes
 
@@ -122,7 +120,7 @@
   // Variables for estimating the major and minor collection costs
   //   minor collection time vs. young gen size
   LinearLeastSquareFit* _minor_collection_estimator;
-  //   major collection time vs. cms gen size
+  //   major collection time vs. old gen size
   LinearLeastSquareFit* _major_collection_estimator;
 
   // These record the most recent collection times.  They
@@ -326,9 +324,6 @@
                      double gc_pause_goal_sec,
                      uint gc_cost_ratio);
 
-  bool is_gc_cms_adaptive_size_policy() {
-    return kind() == _gc_cms_adaptive_size_policy;
-  }
   bool is_gc_ps_adaptive_size_policy() {
     return kind() == _gc_ps_adaptive_size_policy;
   }
--- a/src/hotspot/share/gc/shared/blockOffsetTable.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -352,306 +352,6 @@
 }
 
 //////////////////////////////////////////////////////////////////////
-// BlockOffsetArrayNonContigSpace
-//////////////////////////////////////////////////////////////////////
-
-// The block [blk_start, blk_end) has been allocated;
-// adjust the block offset table to represent this information;
-// NOTE: Clients of BlockOffsetArrayNonContigSpace: consider using
-// the somewhat more lightweight split_block() or
-// (when init_to_zero()) mark_block() wherever possible.
-// right-open interval: [blk_start, blk_end)
-void
-BlockOffsetArrayNonContigSpace::alloc_block(HeapWord* blk_start,
-                                            HeapWord* blk_end) {
-  assert(blk_start != NULL && blk_end > blk_start,
-         "phantom block");
-  single_block(blk_start, blk_end);
-  allocated(blk_start, blk_end);
-}
-
-// Adjust BOT to show that a previously whole block has been split
-// into two.  We verify the BOT for the first part (prefix) and
-// update the  BOT for the second part (suffix).
-//      blk is the start of the block
-//      blk_size is the size of the original block
-//      left_blk_size is the size of the first part of the split
-void BlockOffsetArrayNonContigSpace::split_block(HeapWord* blk,
-                                                 size_t blk_size,
-                                                 size_t left_blk_size) {
-  // Verify that the BOT shows [blk, blk + blk_size) to be one block.
-  verify_single_block(blk, blk_size);
-  // Update the BOT to indicate that [blk + left_blk_size, blk + blk_size)
-  // is one single block.
-  assert(blk_size > 0, "Should be positive");
-  assert(left_blk_size > 0, "Should be positive");
-  assert(left_blk_size < blk_size, "Not a split");
-
-  // Start addresses of prefix block and suffix block.
-  HeapWord* pref_addr = blk;
-  HeapWord* suff_addr = blk + left_blk_size;
-  HeapWord* end_addr  = blk + blk_size;
-
-  // Indices for starts of prefix block and suffix block.
-  size_t pref_index = _array->index_for(pref_addr);
-  if (_array->address_for_index(pref_index) != pref_addr) {
-    // pref_addr does not begin pref_index
-    pref_index++;
-  }
-
-  size_t suff_index = _array->index_for(suff_addr);
-  if (_array->address_for_index(suff_index) != suff_addr) {
-    // suff_addr does not begin suff_index
-    suff_index++;
-  }
-
-  // Definition: A block B, denoted [B_start, B_end) __starts__
-  //     a card C, denoted [C_start, C_end), where C_start and C_end
-  //     are the heap addresses that card C covers, iff
-  //     B_start <= C_start < B_end.
-  //
-  //     We say that a card C "is started by" a block B, iff
-  //     B "starts" C.
-  //
-  //     Note that the cardinality of the set of cards {C}
-  //     started by a block B can be 0, 1, or more.
-  //
-  // Below, pref_index and suff_index are, respectively, the
-  // first (least) card indices that the prefix and suffix of
-  // the split start; end_index is one more than the index of
-  // the last (greatest) card that blk starts.
-  size_t end_index  = _array->index_for(end_addr - 1) + 1;
-
-  // Calculate the # cards that the prefix and suffix affect.
-  size_t num_pref_cards = suff_index - pref_index;
-
-  size_t num_suff_cards = end_index  - suff_index;
-  // Change the cards that need changing
-  if (num_suff_cards > 0) {
-    HeapWord* boundary = _array->address_for_index(suff_index);
-    // Set the offset card for suffix block
-    _array->set_offset_array(suff_index, boundary, suff_addr, true /* reducing */);
-    // Change any further cards that need changing in the suffix
-    if (num_pref_cards > 0) {
-      if (num_pref_cards >= num_suff_cards) {
-        // Unilaterally fix all of the suffix cards: closed card
-        // index interval in args below.
-        set_remainder_to_point_to_start_incl(suff_index + 1, end_index - 1, true /* reducing */);
-      } else {
-        // Unilaterally fix the first (num_pref_cards - 1) following
-        // the "offset card" in the suffix block.
-        const size_t right_most_fixed_index = suff_index + num_pref_cards - 1;
-        set_remainder_to_point_to_start_incl(suff_index + 1,
-          right_most_fixed_index, true /* reducing */);
-        // Fix the appropriate cards in the remainder of the
-        // suffix block -- these are the last num_pref_cards
-        // cards in each power block of the "new" range plumbed
-        // from suff_addr.
-        bool more = true;
-        uint i = 1;
-        // Fix the first power block with  back_by > num_pref_cards.
-        while (more && (i < BOTConstants::N_powers)) {
-          size_t back_by = BOTConstants::power_to_cards_back(i);
-          size_t right_index = suff_index + back_by - 1;
-          size_t left_index  = right_index - num_pref_cards + 1;
-          if (right_index >= end_index - 1) { // last iteration
-            right_index = end_index - 1;
-            more = false;
-          }
-          if (left_index <= right_most_fixed_index) {
-                left_index = right_most_fixed_index + 1;
-          }
-          if (back_by > num_pref_cards) {
-            // Fill in the remainder of this "power block", if it
-            // is non-null.
-            if (left_index <= right_index) {
-              _array->set_offset_array(left_index, right_index,
-                                       BOTConstants::N_words + i - 1, true /* reducing */);
-            } else {
-              more = false; // we are done
-              assert((end_index - 1) == right_index, "Must be at the end.");
-            }
-            i++;
-            break;
-          }
-          i++;
-        }
-        // Fix the rest of the power blocks.
-        while (more && (i < BOTConstants::N_powers)) {
-          size_t back_by = BOTConstants::power_to_cards_back(i);
-          size_t right_index = suff_index + back_by - 1;
-          size_t left_index  = right_index - num_pref_cards + 1;
-          if (right_index >= end_index - 1) { // last iteration
-            right_index = end_index - 1;
-            if (left_index > right_index) {
-              break;
-            }
-            more  = false;
-          }
-          assert(left_index <= right_index, "Error");
-          _array->set_offset_array(left_index, right_index, BOTConstants::N_words + i - 1, true /* reducing */);
-          i++;
-        }
-      }
-    } // else no more cards to fix in suffix
-  } // else nothing needs to be done
-  // Verify that we did the right thing
-  verify_single_block(pref_addr, left_blk_size);
-  verify_single_block(suff_addr, blk_size - left_blk_size);
-}
-
-
-// Mark the BOT such that if [blk_start, blk_end) straddles a card
-// boundary, the card following the first such boundary is marked
-// with the appropriate offset.
-// NOTE: this method does _not_ adjust _unallocated_block or
-// any cards subsequent to the first one.
-void
-BlockOffsetArrayNonContigSpace::mark_block(HeapWord* blk_start,
-                                           HeapWord* blk_end, bool reducing) {
-  do_block_internal(blk_start, blk_end, Action_mark, reducing);
-}
-
-HeapWord* BlockOffsetArrayNonContigSpace::block_start_unsafe(
-  const void* addr) const {
-  assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
-  assert(_bottom <= addr && addr < _end,
-         "addr must be covered by this Array");
-  // Must read this exactly once because it can be modified by parallel
-  // allocation.
-  HeapWord* ub = _unallocated_block;
-  if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
-    assert(ub < _end, "tautology (see above)");
-    return ub;
-  }
-
-  // Otherwise, find the block start using the table.
-  size_t index = _array->index_for(addr);
-  HeapWord* q = _array->address_for_index(index);
-
-  uint offset = _array->offset_array(index);    // Extend u_char to uint.
-  while (offset >= BOTConstants::N_words) {
-    // The excess of the offset from N_words indicates a power of Base
-    // to go back by.
-    size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
-    q -= (BOTConstants::N_words * n_cards_back);
-    assert(q >= _sp->bottom(),
-           "q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
-           p2i(q), p2i(_sp->bottom()));
-    assert(q < _sp->end(),
-           "q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
-           p2i(q), p2i(_sp->end()));
-    index -= n_cards_back;
-    offset = _array->offset_array(index);
-  }
-  assert(offset < BOTConstants::N_words, "offset too large");
-  index--;
-  q -= offset;
-  assert(q >= _sp->bottom(),
-         "q = " PTR_FORMAT " crossed below bottom = " PTR_FORMAT,
-         p2i(q), p2i(_sp->bottom()));
-  assert(q < _sp->end(),
-         "q = " PTR_FORMAT " crossed above end = " PTR_FORMAT,
-         p2i(q), p2i(_sp->end()));
-  HeapWord* n = q;
-
-  while (n <= addr) {
-    debug_only(HeapWord* last = q);   // for debugging
-    q = n;
-    n += _sp->block_size(n);
-    assert(n > q,
-           "Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT ","
-           " while querying blk_start(" PTR_FORMAT ")"
-           " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
-           p2i(n), p2i(last), p2i(addr), p2i(_sp->bottom()), p2i(_sp->end()));
-  }
-  assert(q <= addr,
-         "wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
-         p2i(q), p2i(addr));
-  assert(addr <= n,
-         "wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
-         p2i(addr), p2i(n));
-  return q;
-}
-
-HeapWord* BlockOffsetArrayNonContigSpace::block_start_careful(
-  const void* addr) const {
-  assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
-
-  assert(_bottom <= addr && addr < _end,
-         "addr must be covered by this Array");
-  // Must read this exactly once because it can be modified by parallel
-  // allocation.
-  HeapWord* ub = _unallocated_block;
-  if (BlockOffsetArrayUseUnallocatedBlock && addr >= ub) {
-    assert(ub < _end, "tautology (see above)");
-    return ub;
-  }
-
-  // Otherwise, find the block start using the table, but taking
-  // care (cf block_start_unsafe() above) not to parse any objects/blocks
-  // on the cards themselves.
-  size_t index = _array->index_for(addr);
-  assert(_array->address_for_index(index) == addr,
-         "arg should be start of card");
-
-  HeapWord* q = (HeapWord*)addr;
-  uint offset;
-  do {
-    offset = _array->offset_array(index);
-    if (offset < BOTConstants::N_words) {
-      q -= offset;
-    } else {
-      size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
-      q -= (n_cards_back * BOTConstants::N_words);
-      index -= n_cards_back;
-    }
-  } while (offset >= BOTConstants::N_words);
-  assert(q <= addr, "block start should be to left of arg");
-  return q;
-}
-
-#ifndef PRODUCT
-// Verification & debugging - ensure that the offset table reflects the fact
-// that the block [blk_start, blk_end) or [blk, blk + size) is a
-// single block of storage. NOTE: can't const this because of
-// call to non-const do_block_internal() below.
-void BlockOffsetArrayNonContigSpace::verify_single_block(
-  HeapWord* blk_start, HeapWord* blk_end) {
-  if (VerifyBlockOffsetArray) {
-    do_block_internal(blk_start, blk_end, Action_check);
-  }
-}
-
-void BlockOffsetArrayNonContigSpace::verify_single_block(
-  HeapWord* blk, size_t size) {
-  verify_single_block(blk, blk + size);
-}
-
-// Verify that the given block is before _unallocated_block
-void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
-  HeapWord* blk_start, HeapWord* blk_end) const {
-  if (BlockOffsetArrayUseUnallocatedBlock) {
-    assert(blk_start < blk_end, "Block inconsistency?");
-    assert(blk_end <= _unallocated_block, "_unallocated_block problem");
-  }
-}
-
-void BlockOffsetArrayNonContigSpace::verify_not_unallocated(
-  HeapWord* blk, size_t size) const {
-  verify_not_unallocated(blk, blk + size);
-}
-#endif // PRODUCT
-
-size_t BlockOffsetArrayNonContigSpace::last_active_index() const {
-  if (_unallocated_block == _bottom) {
-    return 0;
-  } else {
-    return _array->index_for(_unallocated_block - 1);
-  }
-}
-
-//////////////////////////////////////////////////////////////////////
 // BlockOffsetArrayContigSpace
 //////////////////////////////////////////////////////////////////////
 
--- a/src/hotspot/share/gc/shared/blockOffsetTable.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -41,7 +41,6 @@
 //
 // BlockOffsetTable (abstract)
 //   - BlockOffsetArray (abstract)
-//     - BlockOffsetArrayNonContigSpace
 //     - BlockOffsetArrayContigSpace
 //
 
@@ -155,12 +154,6 @@
     void* start_ptr = &_offset_array[start];
     // If collector is concurrent, special handling may be needed.
     G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
-#if INCLUDE_CMSGC
-    if (UseConcMarkSweepGC) {
-      memset_with_concurrent_readers(start_ptr, offset, num_cards);
-      return;
-    }
-#endif // INCLUDE_CMSGC
     memset(start_ptr, offset, num_cards);
   }
 
@@ -389,111 +382,6 @@
 
 ////////////////////////////////////////////////////////////////////////////
 // A subtype of BlockOffsetArray that takes advantage of the fact
-// that its underlying space is a NonContiguousSpace, so that some
-// specialized interfaces can be made available for spaces that
-// manipulate the table.
-////////////////////////////////////////////////////////////////////////////
-class BlockOffsetArrayNonContigSpace: public BlockOffsetArray {
-  friend class VMStructs;
- private:
-  // The portion [_unallocated_block, _sp.end()) of the space that
-  // is a single block known not to contain any objects.
-  // NOTE: See BlockOffsetArrayUseUnallocatedBlock flag.
-  HeapWord* _unallocated_block;
-
- public:
-  BlockOffsetArrayNonContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
-    BlockOffsetArray(array, mr, false),
-    _unallocated_block(_bottom) { }
-
-  // Accessor
-  HeapWord* unallocated_block() const {
-    assert(BlockOffsetArrayUseUnallocatedBlock,
-           "_unallocated_block is not being maintained");
-    return _unallocated_block;
-  }
-
-  void set_unallocated_block(HeapWord* block) {
-    assert(BlockOffsetArrayUseUnallocatedBlock,
-           "_unallocated_block is not being maintained");
-    assert(block >= _bottom && block <= _end, "out of range");
-    _unallocated_block = block;
-  }
-
-  // These methods expect to be called with [blk_start, blk_end)
-  // representing a block of memory in the heap.
-  void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
-  void alloc_block(HeapWord* blk, size_t size) {
-    alloc_block(blk, blk + size);
-  }
-
-  // The following methods are useful and optimized for a
-  // non-contiguous space.
-
-  // Given a block [blk_start, blk_start + full_blk_size), and
-  // a left_blk_size < full_blk_size, adjust the BOT to show two
-  // blocks [blk_start, blk_start + left_blk_size) and
-  // [blk_start + left_blk_size, blk_start + full_blk_size).
-  // It is assumed (and verified in the non-product VM) that the
-  // BOT was correct for the original block.
-  void split_block(HeapWord* blk_start, size_t full_blk_size,
-                           size_t left_blk_size);
-
-  // Adjust BOT to show that it has a block in the range
-  // [blk_start, blk_start + size). Only the first card
-  // of BOT is touched. It is assumed (and verified in the
-  // non-product VM) that the remaining cards of the block
-  // are correct.
-  void mark_block(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
-  void mark_block(HeapWord* blk, size_t size, bool reducing = false) {
-    mark_block(blk, blk + size, reducing);
-  }
-
-  // Adjust _unallocated_block to indicate that a particular
-  // block has been newly allocated or freed. It is assumed (and
-  // verified in the non-product VM) that the BOT is correct for
-  // the given block.
-  void allocated(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false) {
-    // Verify that the BOT shows [blk, blk + blk_size) to be one block.
-    verify_single_block(blk_start, blk_end);
-    if (BlockOffsetArrayUseUnallocatedBlock) {
-      _unallocated_block = MAX2(_unallocated_block, blk_end);
-    }
-  }
-
-  void allocated(HeapWord* blk, size_t size, bool reducing = false) {
-    allocated(blk, blk + size, reducing);
-  }
-
-  void freed(HeapWord* blk_start, HeapWord* blk_end);
-  void freed(HeapWord* blk, size_t size);
-
-  HeapWord* block_start_unsafe(const void* addr) const;
-
-  // Requires "addr" to be the start of a card and returns the
-  // start of the block that contains the given address.
-  HeapWord* block_start_careful(const void* addr) const;
-
-  // Verification & debugging: ensure that the offset table reflects
-  // the fact that the block [blk_start, blk_end) or [blk, blk + size)
-  // is a single block of storage. NOTE: can't const this because of
-  // call to non-const do_block_internal() below.
-  void verify_single_block(HeapWord* blk_start, HeapWord* blk_end)
-    PRODUCT_RETURN;
-  void verify_single_block(HeapWord* blk, size_t size) PRODUCT_RETURN;
-
-  // Verify that the given block is before _unallocated_block
-  void verify_not_unallocated(HeapWord* blk_start, HeapWord* blk_end)
-    const PRODUCT_RETURN;
-  void verify_not_unallocated(HeapWord* blk, size_t size)
-    const PRODUCT_RETURN;
-
-  // Debugging support
-  virtual size_t last_active_index() const;
-};
-
-////////////////////////////////////////////////////////////////////////////
-// A subtype of BlockOffsetArray that takes advantage of the fact
 // that its underlying space is a ContiguousSpace, so that its "active"
 // region can be more efficiently tracked (than for a non-contiguous space).
 ////////////////////////////////////////////////////////////////////////////
--- a/src/hotspot/share/gc/shared/blockOffsetTable.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/blockOffsetTable.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -70,30 +70,4 @@
             ParGCRareEvent_lock->owned_by_self()), "Crack");
 }
 
-//////////////////////////////////////////////////////////////////////////
-// BlockOffsetArrayNonContigSpace inlines
-//////////////////////////////////////////////////////////////////////////
-inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk,
-                                                  size_t size) {
-  freed(blk, blk + size);
-}
-
-inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start,
-                                                  HeapWord* blk_end) {
-  // Verify that the BOT shows [blk_start, blk_end) to be one block.
-  verify_single_block(blk_start, blk_end);
-  // adjust _unallocated_block upward or downward
-  // as appropriate
-  if (BlockOffsetArrayUseUnallocatedBlock) {
-    assert(_unallocated_block <= _end,
-           "Inconsistent value for _unallocated_block");
-    if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
-      // CMS-specific note: a block abutting _unallocated_block to
-      // its left is being freed, a new block is being added or
-      // we are resetting following a compaction
-      _unallocated_block = blk_start;
-    }
-  }
-}
-
 #endif // SHARE_GC_SHARED_BLOCKOFFSETTABLE_INLINE_HPP
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -149,10 +149,13 @@
     Node* control = control_dependent ? kit->control() : NULL;
 
     if (in_native) {
-      load = kit->make_load(control, adr, val_type, access.type(), mo);
+      load = kit->make_load(control, adr, val_type, access.type(), mo, dep,
+                            requires_atomic_access, unaligned,
+                            mismatched, unsafe, access.barrier_data());
     } else {
       load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
-                            dep, requires_atomic_access, unaligned, mismatched, unsafe);
+                            dep, requires_atomic_access, unaligned, mismatched, unsafe,
+                            access.barrier_data());
     }
   } else {
     assert(!requires_atomic_access, "not yet supported");
@@ -162,7 +165,8 @@
     MergeMemNode* mm = opt_access.mem();
     PhaseGVN& gvn = opt_access.gvn();
     Node* mem = mm->memory_at(gvn.C->get_alias_index(adr_type));
-    load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo, dep, unaligned, mismatched);
+    load = LoadNode::make(gvn, control, mem, adr, adr_type, val_type, access.type(), mo,
+                          dep, unaligned, mismatched, unsafe, access.barrier_data());
     load = gvn.transform(load);
   }
   access.set_raw_access(load);
@@ -409,28 +413,28 @@
     if (adr->bottom_type()->is_ptr_to_narrowoop()) {
       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
-      load_store = kit->gvn().transform(new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo));
+      load_store = new CompareAndExchangeNNode(kit->control(), mem, adr, newval_enc, oldval_enc, adr_type, value_type->make_narrowoop(), mo);
     } else
 #endif
     {
-      load_store = kit->gvn().transform(new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo));
+      load_store = new CompareAndExchangePNode(kit->control(), mem, adr, new_val, expected_val, adr_type, value_type->is_oopptr(), mo);
     }
   } else {
     switch (access.type()) {
       case T_BYTE: {
-        load_store = kit->gvn().transform(new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+        load_store = new CompareAndExchangeBNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
         break;
       }
       case T_SHORT: {
-        load_store = kit->gvn().transform(new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+        load_store = new CompareAndExchangeSNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
         break;
       }
       case T_INT: {
-        load_store = kit->gvn().transform(new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+        load_store = new CompareAndExchangeINode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
         break;
       }
       case T_LONG: {
-        load_store = kit->gvn().transform(new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo));
+        load_store = new CompareAndExchangeLNode(kit->control(), mem, adr, new_val, expected_val, adr_type, mo);
         break;
       }
       default:
@@ -438,6 +442,9 @@
     }
   }
 
+  load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+  load_store = kit->gvn().transform(load_store);
+
   access.set_raw_access(load_store);
   pin_atomic_op(access);
 
@@ -466,50 +473,50 @@
       Node *newval_enc = kit->gvn().transform(new EncodePNode(new_val, new_val->bottom_type()->make_narrowoop()));
       Node *oldval_enc = kit->gvn().transform(new EncodePNode(expected_val, expected_val->bottom_type()->make_narrowoop()));
       if (is_weak_cas) {
-        load_store = kit->gvn().transform(new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
+        load_store = new WeakCompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
       } else {
-        load_store = kit->gvn().transform(new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo));
+        load_store = new CompareAndSwapNNode(kit->control(), mem, adr, newval_enc, oldval_enc, mo);
       }
     } else
 #endif
     {
       if (is_weak_cas) {
-        load_store = kit->gvn().transform(new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
+        load_store = new WeakCompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
       } else {
-        load_store = kit->gvn().transform(new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo));
+        load_store = new CompareAndSwapPNode(kit->control(), mem, adr, new_val, expected_val, mo);
       }
     }
   } else {
     switch(access.type()) {
       case T_BYTE: {
         if (is_weak_cas) {
-          load_store = kit->gvn().transform(new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new WeakCompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
         } else {
-          load_store = kit->gvn().transform(new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new CompareAndSwapBNode(kit->control(), mem, adr, new_val, expected_val, mo);
         }
         break;
       }
       case T_SHORT: {
         if (is_weak_cas) {
-          load_store = kit->gvn().transform(new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new WeakCompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
         } else {
-          load_store = kit->gvn().transform(new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new CompareAndSwapSNode(kit->control(), mem, adr, new_val, expected_val, mo);
         }
         break;
       }
       case T_INT: {
         if (is_weak_cas) {
-          load_store = kit->gvn().transform(new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new WeakCompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
         } else {
-          load_store = kit->gvn().transform(new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new CompareAndSwapINode(kit->control(), mem, adr, new_val, expected_val, mo);
         }
         break;
       }
       case T_LONG: {
         if (is_weak_cas) {
-          load_store = kit->gvn().transform(new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new WeakCompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
         } else {
-          load_store = kit->gvn().transform(new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo));
+          load_store = new CompareAndSwapLNode(kit->control(), mem, adr, new_val, expected_val, mo);
         }
         break;
       }
@@ -518,6 +525,9 @@
     }
   }
 
+  load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+  load_store = kit->gvn().transform(load_store);
+
   access.set_raw_access(load_store);
   pin_atomic_op(access);
 
@@ -539,27 +549,30 @@
     } else
 #endif
     {
-      load_store = kit->gvn().transform(new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr()));
+      load_store = new GetAndSetPNode(kit->control(), mem, adr, new_val, adr_type, value_type->is_oopptr());
     }
   } else  {
     switch (access.type()) {
       case T_BYTE:
-        load_store = kit->gvn().transform(new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type));
+        load_store = new GetAndSetBNode(kit->control(), mem, adr, new_val, adr_type);
         break;
       case T_SHORT:
-        load_store = kit->gvn().transform(new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type));
+        load_store = new GetAndSetSNode(kit->control(), mem, adr, new_val, adr_type);
         break;
       case T_INT:
-        load_store = kit->gvn().transform(new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type));
+        load_store = new GetAndSetINode(kit->control(), mem, adr, new_val, adr_type);
         break;
       case T_LONG:
-        load_store = kit->gvn().transform(new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type));
+        load_store = new GetAndSetLNode(kit->control(), mem, adr, new_val, adr_type);
         break;
       default:
         ShouldNotReachHere();
     }
   }
 
+  load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+  load_store = kit->gvn().transform(load_store);
+
   access.set_raw_access(load_store);
   pin_atomic_op(access);
 
@@ -581,21 +594,24 @@
 
   switch(access.type()) {
     case T_BYTE:
-      load_store = kit->gvn().transform(new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type));
+      load_store = new GetAndAddBNode(kit->control(), mem, adr, new_val, adr_type);
       break;
     case T_SHORT:
-      load_store = kit->gvn().transform(new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type));
+      load_store = new GetAndAddSNode(kit->control(), mem, adr, new_val, adr_type);
       break;
     case T_INT:
-      load_store = kit->gvn().transform(new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type));
+      load_store = new GetAndAddINode(kit->control(), mem, adr, new_val, adr_type);
       break;
     case T_LONG:
-      load_store = kit->gvn().transform(new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type));
+      load_store = new GetAndAddLNode(kit->control(), mem, adr, new_val, adr_type);
       break;
     default:
       ShouldNotReachHere();
   }
 
+  load_store->as_LoadStore()->set_barrier_data(access.barrier_data());
+  load_store = kit->gvn().transform(load_store);
+
   access.set_raw_access(load_store);
   pin_atomic_op(access);
 
--- a/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/c2/barrierSetC2.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -103,6 +103,7 @@
   Node*             _base;
   C2AccessValuePtr& _addr;
   Node*             _raw_access;
+  uint8_t           _barrier_data;
 
   void fixup_decorators();
 
@@ -113,7 +114,8 @@
     _type(type),
     _base(base),
     _addr(addr),
-    _raw_access(NULL)
+    _raw_access(NULL),
+    _barrier_data(0)
   {}
 
   DecoratorSet decorators() const { return _decorators; }
@@ -124,6 +126,9 @@
   bool is_raw() const             { return (_decorators & AS_RAW) != 0; }
   Node* raw_access() const        { return _raw_access; }
 
+  uint8_t barrier_data() const        { return _barrier_data; }
+  void set_barrier_data(uint8_t data) { _barrier_data = data; }
+
   void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
   virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
 
--- a/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/c2/cardTableBarrierSetC2.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -45,7 +45,7 @@
    }
 }
 
-// vanilla/CMS post barrier
+// vanilla post barrier
 // Insert a write-barrier store.  This is to let generational GC work; we have
 // to flag all oop-stores before the next GC point.
 void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -96,7 +96,7 @@
 // to a newly allocated object along the fast-path. We
 // compensate for such elided card-marks as follows:
 // (a) Generational, non-concurrent collectors, such as
-//     GenCollectedHeap(ParNew,DefNew,Tenured) and
+//     GenCollectedHeap(DefNew,Tenured) and
 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
 //     need the card-mark if and only if the region is
 //     in the old gen, and do not care if the card-mark
@@ -105,17 +105,7 @@
 //     scavenge. For all these cases, we can do a card mark
 //     at the point at which we do a slow path allocation
 //     in the old gen, i.e. in this call.
-// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
-//     in addition that the card-mark for an old gen allocated
-//     object strictly follow any associated initializing stores.
-//     In these cases, the memRegion remembered below is
-//     used to card-mark the entire region either just before the next
-//     slow-path allocation by this thread or just before the next scavenge or
-//     CMS-associated safepoint, whichever of these events happens first.
-//     (The implicit assumption is that the object has been fully
-//     initialized by this point, a fact that we assert when doing the
-//     card-mark.)
-// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
+// (b) G1CollectedHeap(G1) uses two kinds of write barriers. When a
 //     G1 concurrent marking is in progress an SATB (pre-write-)barrier
 //     is used to remember the pre-value of any store. Initializing
 //     stores will not need this barrier, so we need not worry about
@@ -124,11 +114,8 @@
 //     which simply enqueues a (sequence of) dirty cards which may
 //     optionally be refined by the concurrent update threads. Note
 //     that this barrier need only be applied to a non-young write,
-//     but, like in CMS, because of the presence of concurrent refinement
-//     (much like CMS' precleaning), must strictly follow the oop-store.
-//     Thus, using the same protocol for maintaining the intended
-//     invariants turns out, serendepitously, to be the same for both
-//     G1 and CMS.
+//     but, because of the presence of concurrent refinement,
+//     must strictly follow the oop-store.
 //
 // For any future collector, this code should be reexamined with
 // that specific collector in mind, and the documentation above suitably
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -335,9 +335,9 @@
 #ifndef PRODUCT
 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
-    for (size_t slot = 0; slot < size; slot += 1) {
-      assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
-             "Found non badHeapWordValue in pre-allocation check");
+    // please note mismatch between size (in 32/64 bit words), and ju_addr that always point to a 32 bit word
+    for (juint* ju_addr = reinterpret_cast<juint*>(addr); ju_addr < reinterpret_cast<juint*>(addr + size); ++ju_addr) {
+      assert(*ju_addr == badHeapWordVal, "Found non badHeapWordValue in pre-allocation check");
     }
   }
 }
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -88,7 +88,6 @@
 // CollectedHeap
 //   GenCollectedHeap
 //     SerialHeap
-//     CMSHeap
 //   G1CollectedHeap
 //   ParallelScavengeHeap
 //   ShenandoahHeap
@@ -172,7 +171,6 @@
     None,
     Serial,
     Parallel,
-    CMS,
     G1,
     Epsilon,
     Z,
--- a/src/hotspot/share/gc/shared/gcArguments.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcArguments.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -101,7 +101,7 @@
       "AllocateHeapAt and AllocateOldGenAt cannot be used together.\n");
     status = false;
   }
-  if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseConcMarkSweepGC || UseEpsilonGC || UseZGC)) {
+  if (!FLAG_IS_DEFAULT(AllocateOldGenAt) && (UseSerialGC || UseEpsilonGC || UseZGC)) {
     jio_fprintf(defaultStream::error_stream(),
       "AllocateOldGenAt is not supported for selected GC.\n");
     status = false;
--- a/src/hotspot/share/gc/shared/gcCause.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcCause.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -78,18 +78,6 @@
     case _metadata_GC_clear_soft_refs:
       return "Metadata GC Clear Soft References";
 
-    case _cms_generation_full:
-      return "CMS Generation Full";
-
-    case _cms_initial_mark:
-      return "CMS Initial Mark";
-
-    case _cms_final_remark:
-      return "CMS Final Remark";
-
-    case _cms_concurrent_mark:
-      return "CMS Concurrent Mark";
-
     case _old_generation_expanded_on_last_scavenge:
       return "Old Generation Expanded On Last Scavenge";
 
--- a/src/hotspot/share/gc/shared/gcCause.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcCause.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -65,11 +65,6 @@
     _metadata_GC_threshold,
     _metadata_GC_clear_soft_refs,
 
-    _cms_generation_full,
-    _cms_initial_mark,
-    _cms_final_remark,
-    _cms_concurrent_mark,
-
     _old_generation_expanded_on_last_scavenge,
     _old_generation_too_full_to_scavenge,
     _adaptive_size_policy,
@@ -114,13 +109,12 @@
            cause != GCCause::_old_generation_expanded_on_last_scavenge,
            "This GCCause may be correct but is not expected yet: %s",
            to_string(cause));
-    // _tenured_generation_full or _cms_generation_full for full tenured generations
+    // _tenured_generation_full for full tenured generations
     // _adaptive_size_policy for a full collection after a young GC
     // _allocation_failure is the generic cause a collection which could result
     // in the collection of the tenured generation if there is not enough space
     // in the tenured generation to support a young GC.
     return (cause == GCCause::_tenured_generation_full ||
-            cause == GCCause::_cms_generation_full ||
             cause == GCCause::_adaptive_size_policy ||
             cause == GCCause::_allocation_failure);
   }
--- a/src/hotspot/share/gc/shared/gcConfig.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcConfig.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,9 +28,6 @@
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/cmsArguments.hpp"
-#endif
 #if INCLUDE_EPSILONGC
 #include "gc/epsilon/epsilonArguments.hpp"
 #endif
@@ -60,7 +57,6 @@
       _flag(flag), _name(name), _arguments(arguments), _hs_err_name(hs_err_name) {}
 };
 
-       CMSGC_ONLY(static CMSArguments      cmsArguments;)
    EPSILONGC_ONLY(static EpsilonArguments  epsilonArguments;)
         G1GC_ONLY(static G1Arguments       g1Arguments;)
   PARALLELGC_ONLY(static ParallelArguments parallelArguments;)
@@ -71,7 +67,6 @@
 // Table of supported GCs, for translating between command
 // line flag, CollectedHeap::Name and GCArguments instance.
 static const SupportedGC SupportedGCs[] = {
-       CMSGC_ONLY_ARG(SupportedGC(UseConcMarkSweepGC, CollectedHeap::CMS,        cmsArguments,        "concurrent mark sweep gc"))
    EPSILONGC_ONLY_ARG(SupportedGC(UseEpsilonGC,       CollectedHeap::Epsilon,    epsilonArguments,    "epsilon gc"))
         G1GC_ONLY_ARG(SupportedGC(UseG1GC,            CollectedHeap::G1,         g1Arguments,         "g1 gc"))
   PARALLELGC_ONLY_ARG(SupportedGC(UseParallelGC,      CollectedHeap::Parallel,   parallelArguments,   "parallel gc"))
@@ -95,7 +90,6 @@
 bool GCConfig::_gc_selected_ergonomically = false;
 
 void GCConfig::fail_if_unsupported_gc_is_selected() {
-  NOT_CMSGC(       FAIL_IF_SELECTED(UseConcMarkSweepGC, true));
   NOT_EPSILONGC(   FAIL_IF_SELECTED(UseEpsilonGC,       true));
   NOT_G1GC(        FAIL_IF_SELECTED(UseG1GC,            true));
   NOT_PARALLELGC(  FAIL_IF_SELECTED(UseParallelGC,      true));
--- a/src/hotspot/share/gc/shared/gcConfiguration.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcConfiguration.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,10 +41,6 @@
     return ParallelScavenge;
   }
 
-  if (UseConcMarkSweepGC) {
-    return ParNew;
-  }
-
   if (UseZGC || UseShenandoahGC) {
     return NA;
   }
@@ -57,10 +53,6 @@
     return G1Old;
   }
 
-  if (UseConcMarkSweepGC) {
-    return ConcurrentMarkSweep;
-  }
-
   if (UseParallelOldGC) {
     return ParallelOld;
   }
--- a/src/hotspot/share/gc/shared/gcName.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcName.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -33,9 +33,7 @@
   PSMarkSweep,
   ParallelScavenge,
   DefNew,
-  ParNew,
   G1New,
-  ConcurrentMarkSweep,
   G1Old,
   G1Full,
   Z,
@@ -53,9 +51,7 @@
       case PSMarkSweep: return "PSMarkSweep";
       case ParallelScavenge: return "ParallelScavenge";
       case DefNew: return "DefNew";
-      case ParNew: return "ParNew";
       case G1New: return "G1New";
-      case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
       case G1Old: return "G1Old";
       case G1Full: return "G1Full";
       case Z: return "Z";
--- a/src/hotspot/share/gc/shared/gcStats.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcStats.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -36,26 +36,7 @@
  public:
   GCStats();
 
-  enum Name {
-    GCStatsKind,
-    CMSGCStatsKind
-  };
-
-  virtual Name kind() {
-    return GCStatsKind;
-  }
-
   AdaptivePaddedNoZeroDevAverage*  avg_promoted() const { return _avg_promoted; }
-
-  // Average in bytes
-  size_t average_promoted_in_bytes() const {
-    return (size_t)_avg_promoted->average();
-  }
-
-  // Padded average in bytes
-  size_t padded_average_promoted_in_bytes() const {
-    return (size_t)_avg_promoted->padded_average();
-  }
 };
 
 #endif // SHARE_GC_SHARED_GCSTATS_HPP
--- a/src/hotspot/share/gc/shared/gcTrace.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcTrace.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -207,14 +207,4 @@
   DefNewTracer() : YoungGCTracer(DefNew) {}
 };
 
-class ParNewTracer : public YoungGCTracer {
- public:
-  ParNewTracer() : YoungGCTracer(ParNew) {}
-};
-
-class CMSTracer : public OldGCTracer {
- public:
-  CMSTracer() : OldGCTracer(ConcurrentMarkSweep) {}
-};
-
 #endif // SHARE_GC_SHARED_GCTRACE_HPP
--- a/src/hotspot/share/gc/shared/gcTraceSend.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcTraceSend.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -173,7 +173,7 @@
   }
 }
 
-// Common to CMS and G1
+// G1
 void OldGCTracer::send_concurrent_mode_failure_event() {
   EventConcurrentModeFailure e;
   if (e.should_commit()) {
--- a/src/hotspot/share/gc/shared/gcVMOperations.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -192,13 +192,6 @@
 
 // Returns true iff concurrent GCs unloads metadata.
 bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
-#if INCLUDE_CMSGC
-  if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
-    MetaspaceGC::set_should_concurrent_collect(true);
-    return true;
-  }
-#endif
-
 #if INCLUDE_G1GC
   if (UseG1GC && ClassUnloadingWithConcurrentMark) {
     G1CollectedHeap* g1h = G1CollectedHeap::heap();
@@ -238,13 +231,13 @@
   }
 
   if (initiate_concurrent_GC()) {
-    // For CMS and G1 expand since the collection is going to be concurrent.
+    // For G1 expand since the collection is going to be concurrent.
     _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
     if (_result != NULL) {
       return;
     }
 
-    log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
+    log_debug(gc)("G1 full GC for Metaspace");
   }
 
   // Don't clear the soft refs yet.
--- a/src/hotspot/share/gc/shared/gc_globals.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/gc_globals.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -27,9 +27,6 @@
 
 #include "runtime/globals_shared.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/cms_globals.hpp"
-#endif
 #if INCLUDE_EPSILONGC
 #include "gc/epsilon/epsilon_globals.hpp"
 #endif
@@ -64,22 +61,6 @@
                  constraint,                                                \
                  writeable)                                                 \
                                                                             \
-  CMSGC_ONLY(GC_CMS_FLAGS(                                                  \
-    develop,                                                                \
-    develop_pd,                                                             \
-    product,                                                                \
-    product_pd,                                                             \
-    diagnostic,                                                             \
-    diagnostic_pd,                                                          \
-    experimental,                                                           \
-    notproduct,                                                             \
-    manageable,                                                             \
-    product_rw,                                                             \
-    lp64_product,                                                           \
-    range,                                                                  \
-    constraint,                                                             \
-    writeable))                                                             \
-                                                                            \
   EPSILONGC_ONLY(GC_EPSILON_FLAGS(                                          \
     develop,                                                                \
     develop_pd,                                                             \
@@ -178,9 +159,6 @@
                                                                             \
   /* gc */                                                                  \
                                                                             \
-  product(bool, UseConcMarkSweepGC, false,                                  \
-          "Use Concurrent Mark-Sweep GC in the old generation")             \
-                                                                            \
   product(bool, UseSerialGC, false,                                         \
           "Use the Serial garbage collector")                               \
                                                                             \
@@ -286,14 +264,6 @@
           "bigger than this")                                               \
           range(1, max_jint/3)                                              \
                                                                             \
-  product(uintx, OldPLABWeight, 50,                                         \
-          "Percentage (0-100) used to weight the current sample when "      \
-          "computing exponentially decaying average for resizing "          \
-          "OldPLABSize")                                                    \
-          range(0, 100)                                                     \
-                                                                            \
-  product(bool, ResizeOldPLAB, true,                                        \
-          "Dynamically resize (old gen) promotion LAB's")                   \
                                                                             \
   product(bool, AlwaysPreTouch, false,                                      \
           "Force all freshly committed pages to be pre-touched")            \
@@ -311,13 +281,6 @@
           "Size of marking stack")                                          \
           constraint(MarkStackSizeConstraintFunc,AfterErgo)                 \
                                                                             \
-  develop(bool, VerifyBlockOffsetArray, false,                              \
-          "Do (expensive) block offset array verification")                 \
-                                                                            \
-  diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false,              \
-          "Maintain _unallocated_block in BlockOffsetArray "                \
-          "(currently applicable only to CMS collector)")                   \
-                                                                            \
   product(intx, RefDiscoveryPolicy, 0,                                      \
           "Select type of reference discovery policy: "                     \
           "reference-based(0) or referent-based(1)")                        \
@@ -364,9 +327,8 @@
           "collection")                                                     \
                                                                             \
   develop(uintx, PromotionFailureALotCount, 1000,                           \
-          "Number of promotion failures occurring at PLAB "                 \
-          "refill attempts (ParNew) or promotion attempts "                 \
-          "(other young collectors)")                                       \
+          "Number of promotion failures occurring at PLAB promotion "       \
+          "attempts at young collectors")                                   \
                                                                             \
   develop(uintx, PromotionFailureALotInterval, 5,                           \
           "Total collections between promotion failures a lot")             \
@@ -759,8 +721,7 @@
           constraint(YoungPLABSizeConstraintFunc,AfterMemoryInit)           \
                                                                             \
   product(size_t, OldPLABSize, 1024,                                        \
-          "Size of old gen promotion LAB's (in HeapWords), or Number "      \
-          "of blocks to attempt to claim when refilling CMS LAB's")         \
+          "Size of old gen promotion LAB's (in HeapWords)")                 \
           constraint(OldPLABSizeConstraintFunc,AfterMemoryInit)             \
                                                                             \
   product(uintx, TLABAllocationWeight, 35,                                  \
@@ -827,7 +788,6 @@
           "Percentage (0-100) of the old gen allowed as dead wood. "        \
           "Serial mark sweep treats this as both the minimum and maximum "  \
           "value. "                                                         \
-          "CMS uses this value only if it falls back to mark sweep. "       \
           "Par compact uses a variable scale based on the density of the "  \
           "generation and treats this as the maximum value when the heap "  \
           "is either completely full or completely empty.  Par compact "    \
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -418,35 +418,6 @@
   }
 }
 
-#ifndef PRODUCT
-// Override of memory state checking method in CollectedHeap:
-// Some collectors (CMS for example) can't have badHeapWordVal written
-// in the first two words of an object. (For instance , in the case of
-// CMS these words hold state used to synchronize between certain
-// (concurrent) GC steps and direct allocating mutators.)
-// The skip_header_HeapWords() method below, allows us to skip
-// over the requisite number of HeapWord's. Note that (for
-// generational collectors) this means that those many words are
-// skipped in each object, irrespective of the generation in which
-// that object lives. The resultant loss of precision seems to be
-// harmless and the pain of avoiding that imprecision appears somewhat
-// higher than we are prepared to pay for such rudimentary debugging
-// support.
-void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
-                                                         size_t size) {
-  if (CheckMemoryInitialization && ZapUnusedHeapArea) {
-    // We are asked to check a size in HeapWords,
-    // but the memory is mangled in juint words.
-    juint* start = (juint*) (addr + skip_header_HeapWords());
-    juint* end   = (juint*) (addr + size);
-    for (juint* slot = start; slot < end; slot += 1) {
-      assert(*slot == badHeapWordVal,
-             "Found non badHeapWordValue in pre-allocation check");
-    }
-  }
-}
-#endif
-
 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
                                                bool is_tlab,
                                                bool first_only) {
@@ -1229,8 +1200,7 @@
 GenCollectedHeap* GenCollectedHeap::heap() {
   CollectedHeap* heap = Universe::heap();
   assert(heap != NULL, "Uninitialized access to GenCollectedHeap::heap()");
-  assert(heap->kind() == CollectedHeap::Serial ||
-         heap->kind() == CollectedHeap::CMS, "Invalid name");
+  assert(heap->kind() == CollectedHeap::Serial, "Invalid name");
   return (GenCollectedHeap*) heap;
 }
 
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -45,8 +45,6 @@
   friend class Generation;
   friend class DefNewGeneration;
   friend class TenuredGeneration;
-  friend class ConcurrentMarkSweepGeneration;
-  friend class CMSCollector;
   friend class GenMarkSweep;
   friend class VM_GenCollectForAllocation;
   friend class VM_GenCollectFull;
@@ -386,11 +384,6 @@
                      CLDClosure* weak_cld_closure,
                      CodeBlobToOopClosure* code_roots);
 
-  // Accessor for memory state verification support
-  NOT_PRODUCT(
-    virtual size_t skip_header_HeapWords() { return 0; }
-  )
-
   virtual void gc_prologue(bool full);
   virtual void gc_epilogue(bool full);
 
@@ -465,10 +458,6 @@
                               bool is_tlab,
                               bool* gc_overhead_limit_was_exceeded);
 
-  // Override
-  void check_for_non_bad_heap_word_value(HeapWord* addr,
-    size_t size) PRODUCT_RETURN;
-
 #if INCLUDE_SERIALGC
   // For use by mark-sweep.  As implemented, mark-sweep-compact is global
   // in an essential way: compaction is performed across generations, by
--- a/src/hotspot/share/gc/shared/generation.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/generation.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,6 @@
   return gch->old_gen_spec()->init_size();
 }
 
-// This is for CMS. It returns stable monotonic used space size.
-// Remove this when CMS is removed.
-size_t Generation::used_stable() const {
-  return used();
-}
-
 size_t Generation::max_capacity() const {
   return reserved().byte_size();
 }
--- a/src/hotspot/share/gc/shared/generation.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/generation.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -41,20 +41,13 @@
 //
 // Generation                      - abstract base class
 // - DefNewGeneration              - allocation area (copy collected)
-//   - ParNewGeneration            - a DefNewGeneration that is collected by
-//                                   several threads
 // - CardGeneration                 - abstract class adding offset array behavior
 //   - TenuredGeneration             - tenured (old object) space (markSweepCompact)
-//   - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation
-//                                       (Detlefs-Printezis refinement of
-//                                       Boehm-Demers-Schenker)
 //
-// The system configurations currently allowed are:
+// The system configuration currently allowed is:
 //
 //   DefNewGeneration + TenuredGeneration
 //
-//   ParNewGeneration + ConcurrentMarkSweepGeneration
-//
 
 class DefNewGeneration;
 class GCMemoryManager;
@@ -122,9 +115,7 @@
   // The set of possible generation kinds.
   enum Name {
     DefNew,
-    ParNew,
     MarkSweepCompact,
-    ConcurrentMarkSweep,
     Other
   };
 
@@ -156,7 +147,6 @@
   virtual size_t capacity() const = 0;  // The maximum number of object bytes the
                                         // generation can currently hold.
   virtual size_t used() const = 0;      // The number of used bytes in the gen.
-  virtual size_t used_stable() const;   // The number of used bytes for memory monitoring tools.
   virtual size_t free() const = 0;      // The number of free bytes in the gen.
 
   // Support for java.lang.Runtime.maxMemory(); see CollectedHeap.
--- a/src/hotspot/share/gc/shared/generationSpec.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/generationSpec.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,10 +29,6 @@
 #include "memory/filemap.hpp"
 #include "runtime/java.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/cms/parNewGeneration.hpp"
-#endif
 #if INCLUDE_SERIALGC
 #include "gc/serial/defNewGeneration.hpp"
 #include "gc/serial/tenuredGeneration.hpp"
@@ -48,15 +44,6 @@
       return new TenuredGeneration(rs, _init_size, _min_size, _max_size, remset);
 #endif
 
-#if INCLUDE_CMSGC
-    case Generation::ParNew:
-      return new ParNewGeneration(rs, _init_size, _min_size, _max_size);
-
-    case Generation::ConcurrentMarkSweep: {
-      return new ConcurrentMarkSweepGeneration(rs, _init_size, _min_size, _max_size, remset);
-    }
-#endif // INCLUDE_CMSGC
-
     default:
       guarantee(false, "unrecognized GenerationName");
       return NULL;
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -36,9 +36,6 @@
 #include "runtime/thread.inline.hpp"
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/jvmFlagConstraintsCMS.hpp"
-#endif
 #if INCLUDE_G1GC
 #include "gc/g1/jvmFlagConstraintsG1.hpp"
 #endif
@@ -65,22 +62,14 @@
   }
 #endif
 
-#if INCLUDE_CMSGC
-  status = ParallelGCThreadsConstraintFuncCMS(value, verbose);
-  if (status != JVMFlag::SUCCESS) {
-    return status;
-  }
-#endif
-
   return status;
 }
 
 // As ConcGCThreads should be smaller than ParallelGCThreads,
 // we need constraint function.
 JVMFlag::Error ConcGCThreadsConstraintFunc(uint value, bool verbose) {
-  // CMS and G1 GCs use ConcGCThreads.
-  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
-       GCConfig::is_gc_selected(CollectedHeap::G1)) && (value > ParallelGCThreads)) {
+  // G1 GC use ConcGCThreads.
+  if (GCConfig::is_gc_selected(CollectedHeap::G1) && (value > ParallelGCThreads)) {
     JVMFlag::printError(verbose,
                         "ConcGCThreads (" UINT32_FORMAT ") must be "
                         "less than or equal to ParallelGCThreads (" UINT32_FORMAT ")\n",
@@ -92,9 +81,8 @@
 }
 
 static JVMFlag::Error MinPLABSizeBounds(const char* name, size_t value, bool verbose) {
-  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
-       GCConfig::is_gc_selected(CollectedHeap::G1)  ||
-       GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value < PLAB::min_size())) {
+  if ((GCConfig::is_gc_selected(CollectedHeap::G1) || GCConfig::is_gc_selected(CollectedHeap::Parallel)) &&
+      (value < PLAB::min_size())) {
     JVMFlag::printError(verbose,
                         "%s (" SIZE_FORMAT ") must be "
                         "greater than or equal to ergonomic PLAB minimum size (" SIZE_FORMAT ")\n",
@@ -106,8 +94,7 @@
 }
 
 JVMFlag::Error MaxPLABSizeBounds(const char* name, size_t value, bool verbose) {
-  if ((GCConfig::is_gc_selected(CollectedHeap::CMS) ||
-       GCConfig::is_gc_selected(CollectedHeap::G1)  ||
+  if ((GCConfig::is_gc_selected(CollectedHeap::G1) ||
        GCConfig::is_gc_selected(CollectedHeap::Parallel)) && (value > PLAB::max_size())) {
     JVMFlag::printError(verbose,
                         "%s (" SIZE_FORMAT ") must be "
@@ -135,11 +122,6 @@
 JVMFlag::Error OldPLABSizeConstraintFunc(size_t value, bool verbose) {
   JVMFlag::Error status = JVMFlag::SUCCESS;
 
-#if INCLUDE_CMSGC
-  if (UseConcMarkSweepGC) {
-    return OldPLABSizeConstraintFuncCMS(value, verbose);
-  } else
-#endif
   {
     status = MinMaxPLABSizeBounds("OldPLABSize", value, verbose);
   }
--- a/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/jvmFlagConstraintsGC.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -28,9 +28,6 @@
 #include "runtime/flags/jvmFlag.hpp"
 #include "utilities/globalDefinitions.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/jvmFlagConstraintsCMS.hpp"
-#endif
 #if INCLUDE_G1GC
 #include "gc/g1/jvmFlagConstraintsG1.hpp"
 #endif
--- a/src/hotspot/share/gc/shared/preservedMarks.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/preservedMarks.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -128,7 +128,7 @@
 
   // Iterate over all stacks, restore all preserved marks, and reclaim
   // the memory taken up by the stack segments.
-  // Supported executors: SharedRestorePreservedMarksTaskExecutor (Serial, CMS, G1),
+  // Supported executors: SharedRestorePreservedMarksTaskExecutor (Serial, G1),
   // PSRestorePreservedMarksTaskExecutor (PS).
   inline void restore(RestorePreservedMarksTaskExecutor* executor);
 
--- a/src/hotspot/share/gc/shared/referenceProcessor.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/referenceProcessor.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -333,7 +333,7 @@
     log_develop_trace(gc, ref)("Enqueue %s reference (" INTPTR_FORMAT ": %s)",
                                reason, p2i(iter.obj()), iter.obj()->klass()->internal_name());
   }
-  assert(oopDesc::is_oop(iter.obj(), UseConcMarkSweepGC), "Adding a bad reference");
+  assert(oopDesc::is_oop(iter.obj()), "Adding a bad reference");
 }
 
 size_t ReferenceProcessor::process_soft_ref_reconsider_work(DiscoveredList&    refs_list,
@@ -1154,7 +1154,7 @@
       // Check assumption that an object is not potentially
       // discovered twice except by concurrent collectors that potentially
       // trace the same Reference object twice.
-      assert(UseConcMarkSweepGC || UseG1GC || UseShenandoahGC,
+      assert(UseG1GC || UseShenandoahGC,
              "Only possible with a concurrent marking collector");
       return true;
     }
--- a/src/hotspot/share/gc/shared/referenceProcessor.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/referenceProcessor.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -217,8 +217,7 @@
   // For collectors that do not keep GC liveness information
   // in the object header, this field holds a closure that
   // helps the reference processor determine the reachability
-  // of an oop. It is currently initialized to NULL for all
-  // collectors except for CMS and G1.
+  // of an oop.
   BoolObjectClosure* _is_alive_non_header;
 
   // Soft ref clearing policies
--- a/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/vmStructs_gc.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -35,9 +35,6 @@
 #include "gc/shared/generationSpec.hpp"
 #include "gc/shared/oopStorage.hpp"
 #include "gc/shared/space.hpp"
-#if INCLUDE_CMSGC
-#include "gc/cms/vmStructs_cms.hpp"
-#endif
 #if INCLUDE_EPSILONGC
 #include "gc/epsilon/vmStructs_epsilon.hpp"
 #endif
@@ -62,9 +59,6 @@
                       volatile_nonstatic_field,                                                                                      \
                       static_field,                                                                                                  \
                       unchecked_nonstatic_field)                                                                                     \
-  CMSGC_ONLY(VM_STRUCTS_CMSGC(nonstatic_field,                                                                                       \
-                              volatile_nonstatic_field,                                                                              \
-                              static_field))                                                                                         \
   EPSILONGC_ONLY(VM_STRUCTS_EPSILONGC(nonstatic_field,                                                                               \
                                       volatile_nonstatic_field,                                                                      \
                                       static_field))                                                                                 \
@@ -107,8 +101,6 @@
   nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_threshold,                        HeapWord*)                             \
   nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_index,                            size_t)                                \
                                                                                                                                      \
-  nonstatic_field(BlockOffsetArrayNonContigSpace, _unallocated_block,                         HeapWord*)                             \
-                                                                                                                                     \
   nonstatic_field(CardGeneration,              _rs,                                           CardTableRS*)                          \
   nonstatic_field(CardGeneration,              _bts,                                          BlockOffsetSharedArray*)               \
   nonstatic_field(CardGeneration,              _shrink_factor,                                size_t)                                \
@@ -168,9 +160,6 @@
 #define VM_TYPES_GC(declare_type,                                         \
                     declare_toplevel_type,                                \
                     declare_integer_type)                                 \
-  CMSGC_ONLY(VM_TYPES_CMSGC(declare_type,                                 \
-                            declare_toplevel_type,                        \
-                            declare_integer_type))                        \
   EPSILONGC_ONLY(VM_TYPES_EPSILONGC(declare_type,                         \
                                     declare_toplevel_type,                \
                                     declare_integer_type))                \
@@ -213,7 +202,6 @@
   declare_toplevel_type(BlockOffsetTable)                                 \
            declare_type(BlockOffsetArray,             BlockOffsetTable)   \
            declare_type(BlockOffsetArrayContigSpace,  BlockOffsetArray)   \
-           declare_type(BlockOffsetArrayNonContigSpace, BlockOffsetArray) \
                                                                           \
   /* Miscellaneous other GC types */                                      \
                                                                           \
@@ -252,8 +240,6 @@
 
 #define VM_INT_CONSTANTS_GC(declare_constant,                               \
                             declare_constant_with_value)                    \
-  CMSGC_ONLY(VM_INT_CONSTANTS_CMSGC(declare_constant,                       \
-                                    declare_constant_with_value))           \
   EPSILONGC_ONLY(VM_INT_CONSTANTS_EPSILONGC(declare_constant,               \
                                             declare_constant_with_value))   \
   G1GC_ONLY(VM_INT_CONSTANTS_G1GC(declare_constant,                         \
@@ -297,7 +283,6 @@
                                                                             \
   declare_constant(CollectedHeap::Serial)                                   \
   declare_constant(CollectedHeap::Parallel)                                 \
-  declare_constant(CollectedHeap::CMS)                                      \
   declare_constant(CollectedHeap::G1)                                       \
                                                                             \
   /* constants from Generation::Name enum */                                \
--- a/src/hotspot/share/gc/shared/workerPolicy.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shared/workerPolicy.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -59,14 +59,11 @@
   // Return number of GC threads to use in the next GC.
   // This is called sparingly so as not to change the
   // number of GC workers gratuitously.
-  //   For ParNew collections
   //   For PS scavenge and ParOld collections
   //   For G1 evacuation pauses (subject to update)
   //   For G1 Full GCs (subject to update)
   // Other collection phases inherit the number of
-  // GC workers from the calls above.  For example,
-  // a CMS parallel remark uses the same number of GC
-  // workers as the most recent ParNew collection.
+  // GC workers from the calls above.
   static uint calc_active_workers(uintx total_workers,
                                   uintx active_workers,
                                   uintx application_workers);
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -24,8 +24,8 @@
 #include "precompiled.hpp"
 #include "c1/c1_IR.hpp"
 #include "gc/shared/satbMarkQueue.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
-#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 #include "gc/shenandoah/shenandoahRuntime.hpp"
@@ -212,12 +212,11 @@
 
   LIRGenerator* gen = access.gen();
   DecoratorSet decorators = access.decorators();
+  BasicType type = access.type();
 
   // 2: load a reference from src location and apply LRB if ShenandoahLoadRefBarrier is set
-  if (ShenandoahLoadRefBarrier) {
-    // Native barrier is for concurrent root processing
-    bool in_native = (decorators & IN_NATIVE) != 0;
-    if (in_native && ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
+  if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
+    if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
       BarrierSetC1::load_at_resolved(access, result);
       LIR_OprList* args = new LIR_OprList();
       LIR_Opr addr = access.resolved_addr();
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -23,7 +23,7 @@
 
 #include "precompiled.hpp"
 #include "gc/shared/barrierSet.hpp"
-#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
+#include "gc/shenandoah/shenandoahBarrierSet.hpp"
 #include "gc/shenandoah/shenandoahForwarding.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shenandoah/shenandoahHeuristics.hpp"
@@ -542,13 +542,13 @@
 
   Node* load = BarrierSetC2::load_at_resolved(access, val_type);
   DecoratorSet decorators = access.decorators();
-  bool in_native = (decorators & IN_NATIVE) != 0;
+  BasicType type = access.type();
 
-  // 2: apply LRB if ShenandoahLoadRefBarrier is set
-  if (ShenandoahLoadRefBarrier) {
-    // Native barrier is for concurrent root processing
-    bool use_native_barrier = in_native && ShenandoahConcurrentRoots::can_do_concurrent_roots();
-    load = new ShenandoahLoadReferenceBarrierNode(NULL, load, use_native_barrier);
+  // 2: apply LRB if needed
+  if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
+    load = new ShenandoahLoadReferenceBarrierNode(NULL,
+                                                  load,
+                                                  ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type));
     if (access.is_parse_access()) {
       load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
     } else {
@@ -556,8 +556,8 @@
     }
   }
 
-  // 3: apply keep-alive barrier if ShenandoahKeepAliveBarrier is set
-  if (ShenandoahKeepAliveBarrier) {
+  // 3: apply keep-alive barrier if needed
+  if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
     Node* top = Compile::current()->top();
     Node* adr = access.addr().node();
     Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
@@ -583,6 +583,7 @@
     GraphKit* kit = parse_access.kit();
     bool mismatched = (decorators & C2_MISMATCHED) != 0;
     bool is_unordered = (decorators & MO_UNORDERED) != 0;
+    bool in_native = (decorators & IN_NATIVE) != 0;
     bool need_cpu_mem_bar = !is_unordered || mismatched || in_native;
 
     if (on_weak_ref) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahArguments.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -71,7 +71,7 @@
   // compromise here.
   bool ergo_conc = FLAG_IS_DEFAULT(ConcGCThreads);
   if (ergo_conc) {
-    FLAG_SET_DEFAULT(ConcGCThreads, MAX2(1, os::processor_count() / 4));
+    FLAG_SET_DEFAULT(ConcGCThreads, MAX2(1, os::initial_active_processor_count() / 4));
   }
 
   if (ConcGCThreads == 0) {
@@ -85,7 +85,7 @@
   // the number of concurrent threads.
   bool ergo_parallel = FLAG_IS_DEFAULT(ParallelGCThreads);
   if (ergo_parallel) {
-    FLAG_SET_DEFAULT(ParallelGCThreads, MAX2(1, os::processor_count() / 2));
+    FLAG_SET_DEFAULT(ParallelGCThreads, MAX2(1, os::initial_active_processor_count() / 2));
   }
 
   if (ParallelGCThreads == 0) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -27,6 +27,7 @@
 #include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeuristics.hpp"
 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
@@ -71,6 +72,35 @@
   return true;
 }
 
+bool ShenandoahBarrierSet::need_load_reference_barrier(DecoratorSet decorators, BasicType type) {
+  if (!ShenandoahLoadRefBarrier) return false;
+  // Only needed for references
+  return is_reference_type(type);
+}
+
+bool ShenandoahBarrierSet::use_load_reference_barrier_native(DecoratorSet decorators, BasicType type) {
+  assert(need_load_reference_barrier(decorators, type), "Should be subset of LRB");
+  assert(is_reference_type(type), "Why we here?");
+  // Native load reference barrier is only needed for concurrent root processing
+  if (!ShenandoahConcurrentRoots::can_do_concurrent_roots()) {
+    return false;
+  }
+
+  return (decorators & IN_NATIVE) != 0;
+}
+
+bool ShenandoahBarrierSet::need_keep_alive_barrier(DecoratorSet decorators,BasicType type) {
+  if (!ShenandoahKeepAliveBarrier) return false;
+  // Only needed for references
+  if (!is_reference_type(type)) return false;
+
+  bool keep_alive = (decorators & AS_NO_KEEPALIVE) == 0;
+  bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
+  bool is_traversal_mode = ShenandoahHeap::heap()->is_traversal_mode();
+  bool on_weak_ref = (decorators & (ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF)) != 0;
+  return (on_weak_ref || unknown) && (keep_alive || is_traversal_mode);
+}
+
 template <class T>
 inline void ShenandoahBarrierSet::inline_write_ref_field_pre(T* field, oop new_val) {
   shenandoah_assert_not_in_cset_loc_except(field, _heap->cancelled_gc());
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -57,6 +57,10 @@
     return barrier_set()->_satb_mark_queue_set;
   }
 
+  static bool need_load_reference_barrier(DecoratorSet decorators, BasicType type);
+  static bool use_load_reference_barrier_native(DecoratorSet decorators, BasicType type);
+  static bool need_keep_alive_barrier(DecoratorSet decorators, BasicType type);
+
   void print_on(outputStream* st) const;
 
   bool is_a(BarrierSet::Name bsn);
--- a/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/c2/zBarrierSetC2.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -178,47 +178,36 @@
   return size;
 }
 
-static bool barrier_needed(C2Access& access) {
-  return ZBarrierSet::barrier_needed(access.decorators(), access.type());
+static void set_barrier_data(C2Access& access) {
+  if (ZBarrierSet::barrier_needed(access.decorators(), access.type())) {
+    if (access.decorators() & ON_WEAK_OOP_REF) {
+      access.set_barrier_data(ZLoadBarrierWeak);
+    } else {
+      access.set_barrier_data(ZLoadBarrierStrong);
+    }
+  }
 }
 
 Node* ZBarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
-  Node* result = BarrierSetC2::load_at_resolved(access, val_type);
-  if (barrier_needed(access) && access.raw_access()->is_Mem()) {
-    if ((access.decorators() & ON_WEAK_OOP_REF) != 0) {
-      access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierWeak);
-    } else {
-      access.raw_access()->as_Load()->set_barrier_data(ZLoadBarrierStrong);
-    }
-  }
-
-  return result;
+  set_barrier_data(access);
+  return BarrierSetC2::load_at_resolved(access, val_type);
 }
 
 Node* ZBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
                                                     Node* new_val, const Type* val_type) const {
-  Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
-  if (barrier_needed(access)) {
-    access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
-  }
-  return result;
+  set_barrier_data(access);
+  return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, val_type);
 }
 
 Node* ZBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
                                                      Node* new_val, const Type* value_type) const {
-  Node* result = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
-  if (barrier_needed(access)) {
-    access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
-  }
-  return result;
+  set_barrier_data(access);
+  return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
 }
 
 Node* ZBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const {
-  Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
-  if (barrier_needed(access)) {
-    access.raw_access()->as_LoadStore()->set_barrier_data(ZLoadBarrierStrong);
-  }
-  return result;
+  set_barrier_data(access);
+  return BarrierSetC2::atomic_xchg_at_resolved(access, new_val, val_type);
 }
 
 bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type,
--- a/src/hotspot/share/gc/z/zBarrier.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,8 @@
   static const bool Publish     = true;
   static const bool Overflow    = false;
 
+  static void self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr);
+
   template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop barrier(volatile oop* p, oop o);
   template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static oop weak_barrier(volatile oop* p, oop o);
   template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> static void root_barrier(oop* p, oop o);
@@ -49,8 +51,6 @@
   static bool is_good_or_null_fast_path(uintptr_t addr);
   static bool is_weak_good_or_null_fast_path(uintptr_t addr);
 
-  static bool is_resurrection_blocked(volatile oop* p, oop* o);
-
   static bool during_mark();
   static bool during_relocate();
   template <bool finalizable> static bool should_mark_through(uintptr_t addr);
--- a/src/hotspot/share/gc/z/zBarrier.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zBarrier.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,46 @@
 #include "oops/oop.hpp"
 #include "runtime/atomic.hpp"
 
+inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
+  if (heal_addr == 0) {
+    // Never heal with null since it interacts badly with reference processing.
+    // A mutator clearing an oop would be similar to calling Reference.clear(),
+    // which would make the reference non-discoverable or silently dropped
+    // by the reference processor.
+    return;
+  }
+
+  for (;;) {
+    if (addr == heal_addr) {
+      // Already healed
+      return;
+    }
+
+    // Heal
+    const uintptr_t prev_addr = Atomic::cmpxchg(heal_addr, (volatile uintptr_t*)p, addr);
+    if (prev_addr == addr) {
+      // Success
+      return;
+    }
+
+    if (ZAddress::is_good_or_null(prev_addr)) {
+      // No need to heal
+      return;
+    }
+
+    // The oop location was healed by another barrier, but it is still not
+    // good or null. Re-apply healing to make sure the oop is not left with
+    // weaker (remapped or finalizable) metadata bits than what this barrier
+    // tried to apply.
+    assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset");
+    addr = prev_addr;
+  }
+}
+
 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
   uintptr_t addr = ZOop::to_address(o);
 
-retry:
   // Fast path
   if (fast_path(addr)) {
     return ZOop::from_address(addr);
@@ -45,17 +80,8 @@
   // Slow path
   const uintptr_t good_addr = slow_path(addr);
 
-  // Self heal, but only if the address was actually updated by the slow path,
-  // which might not be the case, e.g. when marking through an already good oop.
-  if (p != NULL && good_addr != addr) {
-    const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
-    if (prev_addr != addr) {
-      // Some other thread overwrote the oop. If this oop was updated by a
-      // weak barrier the new oop might not be good, in which case we need
-      // to re-apply this barrier.
-      addr = prev_addr;
-      goto retry;
-    }
+  if (p != NULL) {
+    self_heal(p, addr, good_addr);
   }
 
   return ZOop::from_address(good_addr);
@@ -73,28 +99,12 @@
   }
 
   // Slow path
-  uintptr_t good_addr = slow_path(addr);
+  const uintptr_t good_addr = slow_path(addr);
 
-  // Self heal unless the address returned from the slow path is null,
-  // in which case resurrection was blocked and we must let the reference
-  // processor clear the oop. Mutators are not allowed to clear oops in
-  // these cases, since that would be similar to calling Reference.clear(),
-  // which would make the reference non-discoverable or silently dropped
-  // by the reference processor.
-  if (p != NULL && good_addr != 0) {
-    // The slow path returns a good/marked address, but we never mark oops
-    // in a weak load barrier so we always self heal with the remapped address.
-    const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
-    const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
-    if (prev_addr != addr) {
-      // Some other thread overwrote the oop. The new
-      // oop is guaranteed to be weak good or null.
-      assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
-
-      // Return the good address instead of the weak good address
-      // to ensure that the currently active heap view is used.
-      good_addr = ZAddress::good_or_null(prev_addr);
-    }
+  if (p != NULL) {
+    // The slow path returns a good/marked address or null, but we never mark
+    // oops in a weak load barrier so we always heal with the remapped address.
+    self_heal(p, addr, ZAddress::remapped_or_null(good_addr));
   }
 
   return ZOop::from_address(good_addr);
@@ -134,25 +144,6 @@
   return ZAddress::is_weak_good_or_null(addr);
 }
 
-inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
-  const bool is_blocked = ZResurrection::is_blocked();
-
-  // Reload oop after checking the resurrection blocked state. This is
-  // done to prevent a race where we first load an oop, which is logically
-  // null but not yet cleared, then this oop is cleared by the reference
-  // processor and resurrection is unblocked. At this point the mutator
-  // would see the unblocked state and pass this invalid oop through the
-  // normal barrier path, which would incorrectly try to mark this oop.
-  if (p != NULL) {
-    // First assign to reloaded_o to avoid compiler warning about
-    // implicit dereference of volatile oop.
-    const oop reloaded_o = *p;
-    *o = reloaded_o;
-  }
-
-  return is_blocked;
-}
-
 //
 // Load barrier
 //
@@ -190,16 +181,16 @@
 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
   verify_on_weak(p);
 
-  if (is_resurrection_blocked(p, &o)) {
-    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
+  if (ZResurrection::is_blocked()) {
+    return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
   }
 
   return load_barrier_on_oop_field_preloaded(p, o);
 }
 
 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
-  if (is_resurrection_blocked(p, &o)) {
-    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
+  if (ZResurrection::is_blocked()) {
+    return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
   }
 
   return load_barrier_on_oop_field_preloaded(p, o);
@@ -235,8 +226,8 @@
 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
   verify_on_weak(p);
 
-  if (is_resurrection_blocked(p, &o)) {
-    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
+  if (ZResurrection::is_blocked()) {
+    return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
   }
 
   return weak_load_barrier_on_oop_field_preloaded(p, o);
@@ -252,8 +243,8 @@
 }
 
 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
-  if (is_resurrection_blocked(p, &o)) {
-    return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
+  if (ZResurrection::is_blocked()) {
+    return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
   }
 
   return weak_load_barrier_on_oop_field_preloaded(p, o);
--- a/src/hotspot/share/gc/z/zHeap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zHeap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -40,6 +40,7 @@
 #include "logging/log.hpp"
 #include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
+#include "runtime/handshake.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/thread.hpp"
 #include "utilities/debug.hpp"
@@ -315,7 +316,7 @@
   // Process weak roots
   _weak_roots_processor.process_weak_roots();
 
-  // Prepare to unload unused classes and code
+  // Prepare to unload stale metadata and nmethods
   _unload.prepare();
 
   return true;
@@ -325,6 +326,11 @@
   _reference_processor.set_soft_reference_policy(clear);
 }
 
+class ZRendezvousClosure : public ThreadClosure {
+public:
+  virtual void do_thread(Thread* thread) {}
+};
+
 void ZHeap::process_non_strong_references() {
   // Process Soft/Weak/Final/PhantomReferences
   _reference_processor.process_references();
@@ -332,8 +338,22 @@
   // Process concurrent weak roots
   _weak_roots_processor.process_concurrent_weak_roots();
 
-  // Unload unused classes and code
-  _unload.unload();
+  // Unlink stale metadata and nmethods
+  _unload.unlink();
+
+  // Perform a handshake. This is needed 1) to make sure that stale
+  // metadata and nmethods are no longer observable. And 2), to
+  // prevent the race where a mutator first loads an oop, which is
+  // logically null but not yet cleared. Then this oop gets cleared
+  // by the reference processor and resurrection is unblocked. At
+  // this point the mutator could see the unblocked state and pass
+  // this invalid oop through the normal barrier path, which would
+  // incorrectly try to mark the oop.
+  ZRendezvousClosure cl;
+  Handshake::execute(&cl);
+
+  // Purge stale metadata and nmethods that were unlinked
+  _unload.purge();
 
   // Unblock resurrection of weak/phantom references
   ZResurrection::unblock();
@@ -405,7 +425,7 @@
 void ZHeap::relocate_start() {
   assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
 
-  // Finish unloading of classes and code
+  // Finish unloading stale metadata and nmethods
   _unload.finish();
 
   // Flip address view
--- a/src/hotspot/share/gc/z/zLiveMap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -54,7 +54,9 @@
 
   // Multiple threads can enter here, make sure only one of them
   // resets the marking information while the others busy wait.
-  for (uint32_t seqnum = _seqnum; seqnum != ZGlobalSeqNum; seqnum = _seqnum) {
+  for (uint32_t seqnum = OrderAccess::load_acquire(&_seqnum);
+       seqnum != ZGlobalSeqNum;
+       seqnum = OrderAccess::load_acquire(&_seqnum)) {
     if ((seqnum != seqnum_initializing) &&
         (Atomic::cmpxchg(seqnum_initializing, &_seqnum, seqnum) == seqnum)) {
       // Reset marking information
@@ -65,13 +67,13 @@
       segment_live_bits().clear();
       segment_claim_bits().clear();
 
-      // Make sure the newly reset marking information is
-      // globally visible before updating the page seqnum.
-      OrderAccess::storestore();
+      assert(_seqnum == seqnum_initializing, "Invalid");
 
-      // Update seqnum
-      assert(_seqnum == seqnum_initializing, "Invalid");
-      _seqnum = ZGlobalSeqNum;
+      // Make sure the newly reset marking information is ordered
+      // before the update of the page seqnum, such that when the
+      // up-to-date seqnum is load acquired, the bit maps will not
+      // contain stale information.
+      OrderAccess::release_store(&_seqnum, ZGlobalSeqNum);
       break;
     }
 
@@ -93,10 +95,6 @@
   if (!claim_segment(segment)) {
     // Already claimed, wait for live bit to be set
     while (!is_segment_live(segment)) {
-      // Busy wait. The loadload barrier is needed to make
-      // sure we re-read the live bit every time we loop.
-      OrderAccess::loadload();
-
       // Mark reset contention
       if (!contention) {
         // Count contention once
@@ -122,7 +120,7 @@
   }
 
   // Set live bit
-  const bool success = set_segment_live_atomic(segment);
+  const bool success = set_segment_live(segment);
   assert(success, "Should never fail");
 }
 
--- a/src/hotspot/share/gc/z/zLiveMap.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -55,7 +55,7 @@
   BitMap::idx_t segment_end(BitMap::idx_t segment) const;
 
   bool is_segment_live(BitMap::idx_t segment) const;
-  bool set_segment_live_atomic(BitMap::idx_t segment);
+  bool set_segment_live(BitMap::idx_t segment);
 
   BitMap::idx_t first_live_segment() const;
   BitMap::idx_t next_live_segment(BitMap::idx_t segment) const;
@@ -80,9 +80,9 @@
   size_t live_bytes() const;
 
   bool get(size_t index) const;
-  bool set_atomic(size_t index, bool finalizable, bool& inc_live);
+  bool set(size_t index, bool finalizable, bool& inc_live);
 
-  void inc_live_atomic(uint32_t objects, size_t bytes);
+  void inc_live(uint32_t objects, size_t bytes);
 
   void iterate(ObjectClosure* cl, uintptr_t page_start, size_t page_object_alignment_shift);
 };
--- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -30,6 +30,7 @@
 #include "gc/z/zOop.inline.hpp"
 #include "gc/z/zUtils.inline.hpp"
 #include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/bitMap.inline.hpp"
 #include "utilities/debug.hpp"
 
@@ -38,7 +39,7 @@
 }
 
 inline bool ZLiveMap::is_marked() const {
-  return _seqnum == ZGlobalSeqNum;
+  return OrderAccess::load_acquire(&_seqnum) == ZGlobalSeqNum;
 }
 
 inline uint32_t ZLiveMap::live_objects() const {
@@ -68,15 +69,15 @@
 }
 
 inline bool ZLiveMap::is_segment_live(BitMap::idx_t segment) const {
-  return segment_live_bits().at(segment);
+  return segment_live_bits().par_at(segment);
 }
 
-inline bool ZLiveMap::set_segment_live_atomic(BitMap::idx_t segment) {
-  return segment_live_bits().par_set_bit(segment);
+inline bool ZLiveMap::set_segment_live(BitMap::idx_t segment) {
+  return segment_live_bits().par_set_bit(segment, memory_order_release);
 }
 
 inline bool ZLiveMap::claim_segment(BitMap::idx_t segment) {
-  return segment_claim_bits().par_set_bit(segment);
+  return segment_claim_bits().par_set_bit(segment, memory_order_acq_rel);
 }
 
 inline BitMap::idx_t ZLiveMap::first_live_segment() const {
@@ -102,7 +103,7 @@
          _bitmap.at(index);          // Object is marked
 }
 
-inline bool ZLiveMap::set_atomic(size_t index, bool finalizable, bool& inc_live) {
+inline bool ZLiveMap::set(size_t index, bool finalizable, bool& inc_live) {
   if (!is_marked()) {
     // First object to be marked during this
     // cycle, reset marking information.
@@ -119,7 +120,7 @@
   return _bitmap.par_set_bit_pair(index, finalizable, inc_live);
 }
 
-inline void ZLiveMap::inc_live_atomic(uint32_t objects, size_t bytes) {
+inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) {
   Atomic::add(objects, &_live_objects);
   Atomic::add(bytes, &_live_bytes);
 }
--- a/src/hotspot/share/gc/z/zMarkCache.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zMarkCache.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -44,7 +44,7 @@
 inline void ZMarkCacheEntry::evict() {
   if (_page != NULL) {
     // Write cached data out to page
-    _page->inc_live_atomic(_objects, _bytes);
+    _page->inc_live(_objects, _bytes);
     _page = NULL;
   }
 }
--- a/src/hotspot/share/gc/z/zMarkStack.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zMarkStack.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -62,8 +62,8 @@
 
   bool is_empty() const;
 
-  void push_atomic(T* stack);
-  T* pop_atomic();
+  void push(T* stack);
+  T* pop();
 };
 
 typedef ZStack<ZMarkStackEntry, ZMarkStackSlots>     ZMarkStack;
--- a/src/hotspot/share/gc/z/zMarkStack.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zMarkStack.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -114,7 +114,7 @@
 }
 
 template <typename T>
-inline void ZStackList<T>::push_atomic(T* stack) {
+inline void ZStackList<T>::push(T* stack) {
   T* vstack = _head;
   uint32_t version = 0;
 
@@ -133,7 +133,7 @@
 }
 
 template <typename T>
-inline T* ZStackList<T>::pop_atomic() {
+inline T* ZStackList<T>::pop() {
   T* vstack = _head;
   T* stack = NULL;
   uint32_t version = 0;
@@ -168,20 +168,20 @@
   // contention between mutators and GC workers as much as possible, while
   // still allowing GC workers to help out and steal work from each other.
   if (publish) {
-    _published.push_atomic(stack);
+    _published.push(stack);
   } else {
-    _overflowed.push_atomic(stack);
+    _overflowed.push(stack);
   }
 }
 
 inline ZMarkStack* ZMarkStripe::steal_stack() {
   // Steal overflowed stacks first, then published stacks
-  ZMarkStack* const stack = _overflowed.pop_atomic();
+  ZMarkStack* const stack = _overflowed.pop();
   if (stack != NULL) {
     return stack;
   }
 
-  return _published.pop_atomic();
+  return _published.pop();
 }
 
 inline size_t ZMarkStripeSet::nstripes() const {
--- a/src/hotspot/share/gc/z/zMarkStackAllocator.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -166,7 +166,7 @@
 
 ZMarkStackMagazine* ZMarkStackAllocator::alloc_magazine() {
   // Try allocating from the free list first
-  ZMarkStackMagazine* const magazine = _freelist.pop_atomic();
+  ZMarkStackMagazine* const magazine = _freelist.pop();
   if (magazine != NULL) {
     return magazine;
   }
@@ -181,5 +181,5 @@
 }
 
 void ZMarkStackAllocator::free_magazine(ZMarkStackMagazine* magazine) {
-  _freelist.push_atomic(magazine);
+  _freelist.push(magazine);
 }
--- a/src/hotspot/share/gc/z/zPage.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zPage.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -96,7 +96,7 @@
   bool is_object_strongly_live(uintptr_t addr) const;
   bool mark_object(uintptr_t addr, bool finalizable, bool& inc_live);
 
-  void inc_live_atomic(uint32_t objects, size_t bytes);
+  void inc_live(uint32_t objects, size_t bytes);
   uint32_t live_objects() const;
   size_t live_bytes() const;
 
--- a/src/hotspot/share/gc/z/zPage.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zPage.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -207,11 +207,11 @@
 
   // Set mark bit
   const size_t index = ((ZAddress::offset(addr) - start()) >> object_alignment_shift()) * 2;
-  return _livemap.set_atomic(index, finalizable, inc_live);
+  return _livemap.set(index, finalizable, inc_live);
 }
 
-inline void ZPage::inc_live_atomic(uint32_t objects, size_t bytes) {
-  _livemap.inc_live_atomic(objects, bytes);
+inline void ZPage::inc_live(uint32_t objects, size_t bytes) {
+  _livemap.inc_live(objects, bytes);
 }
 
 inline uint32_t ZPage::live_objects() const {
--- a/src/hotspot/share/gc/z/zUnload.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zUnload.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -36,7 +36,8 @@
 #include "gc/z/zUnload.hpp"
 #include "oops/access.inline.hpp"
 
-static const ZStatSubPhase ZSubPhaseConcurrentClassesUnload("Concurrent Classes Unload");
+static const ZStatSubPhase ZSubPhaseConcurrentClassesUnlink("Concurrent Classes Unlink");
+static const ZStatSubPhase ZSubPhaseConcurrentClassesPurge("Concurrent Classes Purge");
 
 class ZIsUnloadingOopClosure : public OopClosure {
 private:
@@ -126,6 +127,11 @@
 }
 
 void ZUnload::unlink() {
+  if (!ClassUnloading) {
+    return;
+  }
+
+  ZStatTimer timer(ZSubPhaseConcurrentClassesUnlink);
   SuspendibleThreadSetJoiner sts;
   bool unloading_occurred;
 
@@ -135,13 +141,17 @@
   }
 
   Klass::clean_weak_klass_links(unloading_occurred);
-
   ZNMethod::unlink(_workers, unloading_occurred);
-
   DependencyContext::cleaning_end();
 }
 
 void ZUnload::purge() {
+  if (!ClassUnloading) {
+    return;
+  }
+
+  ZStatTimer timer(ZSubPhaseConcurrentClassesPurge);
+
   {
     SuspendibleThreadSetJoiner sts;
     ZNMethod::purge(_workers);
@@ -151,29 +161,6 @@
   CodeCache::purge_exception_caches();
 }
 
-class ZUnloadRendezvousClosure : public ThreadClosure {
-public:
-  void do_thread(Thread* thread) {}
-};
-
-void ZUnload::unload() {
-  if (!ClassUnloading) {
-    return;
-  }
-
-  ZStatTimer timer(ZSubPhaseConcurrentClassesUnload);
-
-  // Unlink stale metadata and nmethods
-  unlink();
-
-  // Make sure stale metadata and nmethods are no longer observable
-  ZUnloadRendezvousClosure cl;
-  Handshake::execute(&cl);
-
-  // Purge stale metadata and nmethods that were unlinked
-  purge();
-}
-
 void ZUnload::finish() {
   // Resize and verify metaspace
   MetaspaceGC::compute_new_size();
--- a/src/hotspot/share/gc/z/zUnload.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/gc/z/zUnload.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,14 +30,12 @@
 private:
   ZWorkers* const _workers;
 
-  void unlink();
-  void purge();
-
 public:
   ZUnload(ZWorkers* workers);
 
   void prepare();
-  void unload();
+  void unlink();
+  void purge();
   void finish();
 };
 
--- a/src/hotspot/share/include/cds.h	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/include/cds.h	Thu Nov 14 13:50:03 2019 +0000
@@ -33,26 +33,29 @@
 //
 // Also, this is a C header file. Do not use C++ here.
 
-#define NUM_CDS_REGIONS 8 // this must be the same as MetaspaceShared::n_regions
+#define NUM_CDS_REGIONS 9 // this must be the same as MetaspaceShared::n_regions
 #define CDS_ARCHIVE_MAGIC 0xf00baba2
 #define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8
-#define CURRENT_CDS_ARCHIVE_VERSION 8
+#define CURRENT_CDS_ARCHIVE_VERSION 9
 #define INVALID_CDS_ARCHIVE_VERSION -1
 
 struct CDSFileMapRegion {
-  int        _crc;            // crc checksum of the current space
-  size_t     _file_offset;    // sizeof(this) rounded to vm page size
-  union {
-    char*    _base;           // copy-on-write base address
-    size_t   _offset;         // offset from the compressed oop encoding base, only used
-                              // by archive heap space
-  } _addr;
-  size_t     _used;           // for setting space top on read
-  int        _read_only;      // read only space?
-  int        _allow_exec;     // executable code in space?
-  void*      _oopmap;         // bitmap for relocating embedded oops
-  size_t     _oopmap_size_in_bits;
-  int        _is_heap_region; // used in debug build only.
+  int     _crc;               // CRC checksum of this region.
+  int     _read_only;         // read only region?
+  int     _allow_exec;        // executable code in this region?
+  int     _is_heap_region;    // Used by SA and debug build.
+  int     _is_bitmap_region;  // Relocation bitmap for RO/RW/MC/MD regions (used by SA and debug build).
+  int     _mapped_from_file;  // Is this region mapped from a file?
+                              // If false, this region was initialized using os::read().
+  size_t  _file_offset;       // Data for this region starts at this offset in the archive file.
+  size_t  _mapping_offset;    // This region should be mapped at this offset from the base address
+                              // - for non-heap regions, the base address is SharedBaseAddress
+                              // - for heap regions, the base address is the compressed oop encoding base
+  size_t  _used;              // Number of bytes actually used by this region (excluding padding bytes added
+                              // for alignment purposed.
+  size_t  _oopmap_offset;     // Bitmap for relocating embedded oops (offset from SharedBaseAddress).
+  size_t  _oopmap_size_in_bits;
+  char*   _mapped_base;       // Actually mapped address (NULL if this region is not mapped).
 };
 
 struct CDSFileMapHeaderBase {
--- a/src/hotspot/share/interpreter/abstractInterpreter.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/abstractInterpreter.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -267,7 +267,8 @@
         }
         assert(!invoke_bc.has_index_u4(code), "sanity");
         int method_index = invoke_bc.get_index_u2_cpcache(code);
-        Method* resolved_method = ConstantPool::method_at_if_loaded(cpool, method_index);
+        constantPoolHandle cp(Thread::current(), cpool);
+        Method* resolved_method = ConstantPool::method_at_if_loaded(cp, method_index);
         return (resolved_method == NULL);
       }
       default: ShouldNotReachHere();
--- a/src/hotspot/share/interpreter/bytecode.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/bytecode.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,7 +151,7 @@
 }
 
 
-methodHandle Bytecode_invoke::static_target(TRAPS) {
+Method* Bytecode_invoke::static_target(TRAPS) {
   constantPoolHandle constants(THREAD, this->constants());
 
   Bytecodes::Code bc = invoke_code();
@@ -160,8 +160,10 @@
 
 Handle Bytecode_invoke::appendix(TRAPS) {
   ConstantPoolCacheEntry* cpce = cpcache_entry();
-  if (cpce->has_appendix())
-    return Handle(THREAD, cpce->appendix_if_resolved(constants()));
+  if (cpce->has_appendix()) {
+    constantPoolHandle cp(THREAD, constants());
+    return Handle(THREAD, cpce->appendix_if_resolved(cp));
+  }
   return Handle();  // usual case
 }
 
--- a/src/hotspot/share/interpreter/bytecode.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/bytecode.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -209,7 +209,7 @@
   void verify() const;
 
   // Attributes
-  methodHandle static_target(TRAPS);             // "specified" method   (from constant pool)
+  Method* static_target(TRAPS);                  // "specified" method   (from constant pool)
   Handle       appendix(TRAPS);                  // if CPCE::has_appendix (from constant pool)
 
   // Testers
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2462,8 +2462,8 @@
         if (VerifyOops) method->verify();
 
         if (cache->has_appendix()) {
-          ConstantPool* constants = METHOD->constants();
-          SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
+          constantPoolHandle cp(THREAD, METHOD->constants());
+          SET_STACK_OBJECT(cache->appendix_if_resolved(cp), 0);
           MORE_STACK(1);
         }
 
@@ -2493,8 +2493,8 @@
         if (VerifyOops) method->verify();
 
         if (cache->has_appendix()) {
-          ConstantPool* constants = METHOD->constants();
-          SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
+          constantPoolHandle cp(THREAD, METHOD->constants());
+          SET_STACK_OBJECT(cache->appendix_if_resolved(cp), 0);
           MORE_STACK(1);
         }
 
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -356,7 +356,7 @@
 #ifdef CC_INTERP
 // As legacy note_trap, but we have more arguments.
 JRT_ENTRY(void, InterpreterRuntime::note_trap(JavaThread* thread, int reason, Method *method, int trap_bci))
-  methodHandle trap_method(method);
+  methodHandle trap_method(thread, method);
   note_trap_inner(thread, reason, trap_method, trap_bci, THREAD);
 JRT_END
 
@@ -897,7 +897,7 @@
       // (see also CallInfo::set_interface for details)
       assert(info.call_kind() == CallInfo::vtable_call ||
              info.call_kind() == CallInfo::direct_call, "");
-      methodHandle rm = info.resolved_method();
+      Method* rm = info.resolved_method();
       assert(rm->is_final() || info.has_vtable_index(),
              "should have been set already");
     } else if (!info.resolved_method()->has_itable_index()) {
@@ -921,25 +921,26 @@
   // methods must be checked for every call.
   InstanceKlass* sender = pool->pool_holder();
   sender = sender->is_unsafe_anonymous() ? sender->unsafe_anonymous_host() : sender;
+  methodHandle resolved_method(THREAD, info.resolved_method());
 
   switch (info.call_kind()) {
   case CallInfo::direct_call:
     cp_cache_entry->set_direct_call(
       bytecode,
-      info.resolved_method(),
+      resolved_method,
       sender->is_interface());
     break;
   case CallInfo::vtable_call:
     cp_cache_entry->set_vtable_call(
       bytecode,
-      info.resolved_method(),
+      resolved_method,
       info.vtable_index());
     break;
   case CallInfo::itable_call:
     cp_cache_entry->set_itable_call(
       bytecode,
       info.resolved_klass(),
-      info.resolved_method(),
+      resolved_method,
       info.itable_index());
     break;
   default:  ShouldNotReachHere();
--- a/src/hotspot/share/interpreter/linkResolver.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/linkResolver.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -140,8 +140,8 @@
   }
   _resolved_klass  = resolved_klass;
   _selected_klass  = resolved_klass;
-  _resolved_method = resolved_method;
-  _selected_method = resolved_method;
+  _resolved_method = methodHandle(THREAD, resolved_method);
+  _selected_method = methodHandle(THREAD, resolved_method);
   // classify:
   CallKind kind = CallInfo::unknown_kind;
   int index = resolved_method->vtable_index();
@@ -153,7 +153,7 @@
   } else if (!resolved_klass->is_interface()) {
     // A default or miranda method.  Compute the vtable index.
     index = LinkResolver::vtable_index_of_interface_method(resolved_klass,
-                           resolved_method);
+                           _resolved_method);
     assert(index >= 0 , "we should have valid vtable index at this point");
 
     kind = CallInfo::vtable_call;
@@ -189,9 +189,8 @@
 }
 
 void CallInfo::set_resolved_method_name(TRAPS) {
-  Method* m = _resolved_method();
-  assert(m != NULL, "Should already have a Method*");
-  oop rmethod_name = java_lang_invoke_ResolvedMethodName::find_resolved_method(m, CHECK);
+  assert(_resolved_method() != NULL, "Should already have a Method*");
+  oop rmethod_name = java_lang_invoke_ResolvedMethodName::find_resolved_method(_resolved_method, CHECK);
   _resolved_method_name = Handle(THREAD, rmethod_name);
 }
 
@@ -381,10 +380,10 @@
 
 // returns first instance method
 // Looks up method in classes, then looks up local default methods
-methodHandle LinkResolver::lookup_instance_method_in_klasses(Klass* klass,
-                                                             Symbol* name,
-                                                             Symbol* signature,
-                                                             Klass::PrivateLookupMode private_mode, TRAPS) {
+Method* LinkResolver::lookup_instance_method_in_klasses(Klass* klass,
+                                                        Symbol* name,
+                                                        Symbol* signature,
+                                                        Klass::PrivateLookupMode private_mode, TRAPS) {
   Method* result = klass->uncached_lookup_method(name, signature, Klass::find_overpass, private_mode);
 
   while (result != NULL && result->is_static() && result->method_holder()->super() != NULL) {
@@ -394,7 +393,7 @@
 
   if (klass->is_array_klass()) {
     // Only consider klass and super klass for arrays
-    return methodHandle(THREAD, result);
+    return result;
   }
 
   if (result == NULL) {
@@ -404,7 +403,7 @@
       assert(result == NULL || !result->is_static(), "static defaults not allowed");
     }
   }
-  return methodHandle(THREAD, result);
+  return result;
 }
 
 int LinkResolver::vtable_index_of_interface_method(Klass* klass,
@@ -441,10 +440,9 @@
   return ik->lookup_method_in_all_interfaces(cp_info.name(), cp_info.signature(), Klass::skip_defaults);
 }
 
-methodHandle LinkResolver::lookup_polymorphic_method(
-                                             const LinkInfo& link_info,
-                                             Handle *appendix_result_or_null,
-                                             TRAPS) {
+Method* LinkResolver::lookup_polymorphic_method(const LinkInfo& link_info,
+                                                Handle *appendix_result_or_null,
+                                                TRAPS) {
   Klass* klass = link_info.resolved_klass();
   Symbol* name = link_info.name();
   Symbol* full_signature = link_info.signature();
@@ -472,10 +470,10 @@
                       full_signature->as_C_string(),
                       basic_signature->as_C_string());
       }
-      methodHandle result = SystemDictionary::find_method_handle_intrinsic(iid,
+      Method* result = SystemDictionary::find_method_handle_intrinsic(iid,
                                                               basic_signature,
                                                               CHECK_NULL);
-      if (result.not_null()) {
+      if (result != NULL) {
         assert(result->is_method_handle_intrinsic(), "MH.invokeBasic or MH.linkTo* intrinsic");
         assert(result->intrinsic_id() != vmIntrinsics::_invokeGeneric, "wrong place to find this");
         assert(basic_signature == result->signature(), "predict the result signature");
@@ -505,7 +503,7 @@
 
       Handle appendix;
       Handle method_type;
-      methodHandle result = SystemDictionary::find_method_handle_invoker(
+      Method* result = SystemDictionary::find_method_handle_invoker(
                                                             klass,
                                                             name,
                                                             full_signature,
@@ -520,7 +518,7 @@
         if (appendix.is_null())  tty->print_cr("(none)");
         else                     appendix->print_on(tty);
       }
-      if (result.not_null()) {
+      if (result != NULL) {
 #ifdef ASSERT
         ResourceMark rm(THREAD);
 
@@ -603,8 +601,8 @@
   }
 }
 
-methodHandle LinkResolver::resolve_method_statically(Bytecodes::Code code,
-                                                     const constantPoolHandle& pool, int index, TRAPS) {
+Method* LinkResolver::resolve_method_statically(Bytecodes::Code code,
+                                                const constantPoolHandle& pool, int index, TRAPS) {
   // This method is used only
   // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
   // and
@@ -629,7 +627,7 @@
           MethodHandles::is_signature_polymorphic_name(resolved_klass, link_info.name()))) {
     Method* result = ConstantPool::method_at_if_loaded(pool, index);
     if (result != NULL) {
-      return methodHandle(THREAD, result);
+      return result;
     }
   }
 
@@ -714,8 +712,8 @@
   }
 }
 
-methodHandle LinkResolver::resolve_method(const LinkInfo& link_info,
-                                          Bytecodes::Code code, TRAPS) {
+Method* LinkResolver::resolve_method(const LinkInfo& link_info,
+                                     Bytecodes::Code code, TRAPS) {
 
   Handle nested_exception;
   Klass* resolved_klass = link_info.resolved_klass();
@@ -748,7 +746,8 @@
 
     if (resolved_method.is_null()) {
       // JSR 292:  see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc
-      resolved_method = lookup_polymorphic_method(link_info, (Handle*)NULL, THREAD);
+      Method* method = lookup_polymorphic_method(link_info, (Handle*)NULL, THREAD);
+      resolved_method = methodHandle(THREAD, method);
       if (HAS_PENDING_EXCEPTION) {
         nested_exception = Handle(THREAD, PENDING_EXCEPTION);
         CLEAR_PENDING_EXCEPTION;
@@ -783,13 +782,13 @@
     check_method_loader_constraints(link_info, resolved_method, "method", CHECK_NULL);
   }
 
-  return resolved_method;
+  return resolved_method();
 }
 
 static void trace_method_resolution(const char* prefix,
                                     Klass* klass,
                                     Klass* resolved_klass,
-                                    const methodHandle& method,
+                                    Method* method,
                                     bool logitables,
                                     int index = -1) {
 #ifndef PRODUCT
@@ -821,7 +820,7 @@
 }
 
 // Do linktime resolution of a method in the interface within the context of the specied bytecode.
-methodHandle LinkResolver::resolve_interface_method(const LinkInfo& link_info, Bytecodes::Code code, TRAPS) {
+Method* LinkResolver::resolve_interface_method(const LinkInfo& link_info, Bytecodes::Code code, TRAPS) {
 
   Klass* resolved_klass = link_info.resolved_klass();
 
@@ -892,11 +891,10 @@
     char buf[200];
     jio_snprintf(buf, sizeof(buf), "%s resolved interface method: caller-class:",
                  Bytecodes::name(code));
-    trace_method_resolution(buf, link_info.current_klass(), resolved_klass,
-                            resolved_method, true);
+    trace_method_resolution(buf, link_info.current_klass(), resolved_klass, resolved_method(), true);
   }
 
-  return resolved_method;
+  return resolved_method();
 }
 
 //------------------------------------------------------------------------------------------------------------------------
@@ -999,11 +997,11 @@
       }
 
       if (fd.constants()->pool_holder()->major_version() >= 53) {
-        methodHandle m = link_info.current_method();
-        assert(!m.is_null(), "information about the current method must be available for 'put' bytecodes");
+        Method* m = link_info.current_method();
+        assert(m != NULL, "information about the current method must be available for 'put' bytecodes");
         bool is_initialized_static_final_update = (byte == Bytecodes::_putstatic &&
                                                    fd.is_static() &&
-                                                   !m()->is_static_initializer());
+                                                   !m->is_static_initializer());
         bool is_initialized_instance_final_update = ((byte == Bytecodes::_putfield || byte == Bytecodes::_nofast_putfield) &&
                                                      !fd.is_static() &&
                                                      !m->is_object_initializer());
@@ -1011,7 +1009,7 @@
         if (is_initialized_static_final_update || is_initialized_instance_final_update) {
           ss.print("Update to %s final field %s.%s attempted from a different method (%s) than the initializer method %s ",
                    is_static ? "static" : "non-static", resolved_klass->external_name(), fd.name()->as_C_string(),
-                   m()->name()->as_C_string(),
+                   m->name()->as_C_string(),
                    is_static ? "<clinit>" : "<init>");
           THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), ss.as_string());
         }
@@ -1052,7 +1050,7 @@
 void LinkResolver::resolve_static_call(CallInfo& result,
                                        const LinkInfo& link_info,
                                        bool initialize_class, TRAPS) {
-  methodHandle resolved_method = linktime_resolve_static_method(link_info, CHECK);
+  Method* resolved_method = linktime_resolve_static_method(link_info, CHECK);
 
   // The resolved class can change as a result of this resolution.
   Klass* resolved_klass = resolved_method->method_holder();
@@ -1068,14 +1066,14 @@
   }
 
   // setup result
-  result.set_static(resolved_klass, resolved_method, CHECK);
+  result.set_static(resolved_klass, methodHandle(THREAD, resolved_method), CHECK);
 }
 
 // throws linktime exceptions
-methodHandle LinkResolver::linktime_resolve_static_method(const LinkInfo& link_info, TRAPS) {
+Method* LinkResolver::linktime_resolve_static_method(const LinkInfo& link_info, TRAPS) {
 
   Klass* resolved_klass = link_info.resolved_klass();
-  methodHandle resolved_method;
+  Method* resolved_method;
   if (!resolved_klass->is_interface()) {
     resolved_method = resolve_method(link_info, Bytecodes::_invokestatic, CHECK_NULL);
   } else {
@@ -1088,7 +1086,7 @@
     ResourceMark rm(THREAD);
     stringStream ss;
     ss.print("Expected static method '");
-    resolved_method()->print_external_name(&ss);
+    resolved_method->print_external_name(&ss);
     ss.print("'");
     THROW_MSG_NULL(vmSymbols::java_lang_IncompatibleClassChangeError(), ss.as_string());
   }
@@ -1100,13 +1098,12 @@
                                         Handle recv,
                                         const LinkInfo& link_info,
                                         TRAPS) {
-  methodHandle resolved_method = linktime_resolve_special_method(link_info, CHECK);
-  runtime_resolve_special_method(result, link_info, resolved_method, recv, CHECK);
+  Method* resolved_method = linktime_resolve_special_method(link_info, CHECK);
+  runtime_resolve_special_method(result, link_info, methodHandle(THREAD, resolved_method), recv, CHECK);
 }
 
 // throws linktime exceptions
-methodHandle LinkResolver::linktime_resolve_special_method(const LinkInfo& link_info,
-                                                           TRAPS) {
+Method* LinkResolver::linktime_resolve_special_method(const LinkInfo& link_info, TRAPS) {
 
   // Invokespecial is called for multiple special reasons:
   // <init>
@@ -1115,7 +1112,7 @@
   // and the selected method is recalculated relative to the direct superclass
   // superinterface.method, which explicitly does not check shadowing
   Klass* resolved_klass = link_info.resolved_klass();
-  methodHandle resolved_method;
+  Method* resolved_method;
 
   if (!resolved_klass->is_interface()) {
     resolved_method = resolve_method(link_info, Bytecodes::_invokespecial, CHECK_NULL);
@@ -1210,10 +1207,12 @@
         current_klass != resolved_klass) {
       // Lookup super method
       Klass* super_klass = current_klass->super();
-      sel_method = lookup_instance_method_in_klasses(super_klass,
+      Method* instance_method = lookup_instance_method_in_klasses(super_klass,
                                                      resolved_method->name(),
                                                      resolved_method->signature(),
                                                      Klass::find_private, CHECK);
+      sel_method = methodHandle(THREAD, instance_method);
+
       // check if found
       if (sel_method.is_null()) {
         ResourceMark rm(THREAD);
@@ -1272,7 +1271,7 @@
 
   if (log_develop_is_enabled(Trace, itables)) {
     trace_method_resolution("invokespecial selected method: resolved-class:",
-                            resolved_klass, resolved_klass, sel_method, true);
+                            resolved_klass, resolved_klass, sel_method(), true);
   }
 
   // setup result
@@ -1282,18 +1281,18 @@
 void LinkResolver::resolve_virtual_call(CallInfo& result, Handle recv, Klass* receiver_klass,
                                         const LinkInfo& link_info,
                                         bool check_null_and_abstract, TRAPS) {
-  methodHandle resolved_method = linktime_resolve_virtual_method(link_info, CHECK);
-  runtime_resolve_virtual_method(result, resolved_method,
+  Method* resolved_method = linktime_resolve_virtual_method(link_info, CHECK);
+  runtime_resolve_virtual_method(result, methodHandle(THREAD, resolved_method),
                                  link_info.resolved_klass(),
                                  recv, receiver_klass,
                                  check_null_and_abstract, CHECK);
 }
 
 // throws linktime exceptions
-methodHandle LinkResolver::linktime_resolve_virtual_method(const LinkInfo& link_info,
+Method* LinkResolver::linktime_resolve_virtual_method(const LinkInfo& link_info,
                                                            TRAPS) {
   // normal method resolution
-  methodHandle resolved_method = resolve_method(link_info, Bytecodes::_invokevirtual, CHECK_NULL);
+  Method* resolved_method = resolve_method(link_info, Bytecodes::_invokevirtual, CHECK_NULL);
 
   assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
   assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
@@ -1392,7 +1391,7 @@
 
   if (log_develop_is_enabled(Trace, vtables)) {
     trace_method_resolution("invokevirtual selected method: receiver-class:",
-                            recv_klass, resolved_klass, selected_method,
+                            recv_klass, resolved_klass, selected_method(),
                             false, vtable_index);
   }
   // setup result
@@ -1403,15 +1402,16 @@
                                           const LinkInfo& link_info,
                                           bool check_null_and_abstract, TRAPS) {
   // throws linktime exceptions
-  methodHandle resolved_method = linktime_resolve_interface_method(link_info, CHECK);
-  runtime_resolve_interface_method(result, resolved_method,link_info.resolved_klass(),
+  Method* resolved_method = linktime_resolve_interface_method(link_info, CHECK);
+  methodHandle mh(THREAD, resolved_method);
+  runtime_resolve_interface_method(result, mh, link_info.resolved_klass(),
                                    recv, recv_klass, check_null_and_abstract, CHECK);
 }
 
-methodHandle LinkResolver::linktime_resolve_interface_method(const LinkInfo& link_info,
+Method* LinkResolver::linktime_resolve_interface_method(const LinkInfo& link_info,
                                                              TRAPS) {
   // normal interface method resolution
-  methodHandle resolved_method = resolve_interface_method(link_info, Bytecodes::_invokeinterface, CHECK_NULL);
+  Method* resolved_method = resolve_interface_method(link_info, Bytecodes::_invokeinterface, CHECK_NULL);
   assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
   assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
 
@@ -1449,10 +1449,11 @@
     // This search must match the linktime preparation search for itable initialization
     // to correctly enforce loader constraints for interface method inheritance.
     // Private methods are skipped as the resolved method was not private.
-    selected_method = lookup_instance_method_in_klasses(recv_klass,
-                                                        resolved_method->name(),
-                                                        resolved_method->signature(),
-                                                        Klass::skip_private, CHECK);
+    Method* method = lookup_instance_method_in_klasses(recv_klass,
+                                                       resolved_method->name(),
+                                                       resolved_method->signature(),
+                                                       Klass::skip_private, CHECK);
+    selected_method = methodHandle(THREAD, method);
 
     if (selected_method.is_null() && !check_null_and_abstract) {
       // In theory this is a harmless placeholder value, but
@@ -1483,7 +1484,7 @@
 
   if (log_develop_is_enabled(Trace, itables)) {
     trace_method_resolution("invokeinterface selected method: receiver-class:",
-                            recv_klass, resolved_klass, selected_method, true);
+                            recv_klass, resolved_klass, selected_method(), true);
   }
   // setup result
   if (resolved_method->has_vtable_index()) {
@@ -1509,31 +1510,31 @@
 }
 
 
-methodHandle LinkResolver::linktime_resolve_interface_method_or_null(
+Method* LinkResolver::linktime_resolve_interface_method_or_null(
                                                  const LinkInfo& link_info) {
   EXCEPTION_MARK;
-  methodHandle method_result = linktime_resolve_interface_method(link_info, THREAD);
+  Method* method_result = linktime_resolve_interface_method(link_info, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
-    return methodHandle();
+    return NULL;
   } else {
     return method_result;
   }
 }
 
-methodHandle LinkResolver::linktime_resolve_virtual_method_or_null(
+Method* LinkResolver::linktime_resolve_virtual_method_or_null(
                                                  const LinkInfo& link_info) {
   EXCEPTION_MARK;
-  methodHandle method_result = linktime_resolve_virtual_method(link_info, THREAD);
+  Method* method_result = linktime_resolve_virtual_method(link_info, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
-    return methodHandle();
+    return NULL;
   } else {
     return method_result;
   }
 }
 
-methodHandle LinkResolver::resolve_virtual_call_or_null(
+Method* LinkResolver::resolve_virtual_call_or_null(
                                                  Klass* receiver_klass,
                                                  const LinkInfo& link_info) {
   EXCEPTION_MARK;
@@ -1541,12 +1542,12 @@
   resolve_virtual_call(info, Handle(), receiver_klass, link_info, false, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
-    return methodHandle();
+    return NULL;
   }
   return info.selected_method();
 }
 
-methodHandle LinkResolver::resolve_interface_call_or_null(
+Method* LinkResolver::resolve_interface_call_or_null(
                                                  Klass* receiver_klass,
                                                  const LinkInfo& link_info) {
   EXCEPTION_MARK;
@@ -1554,7 +1555,7 @@
   resolve_interface_call(info, Handle(), receiver_klass, link_info, false, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
-    return methodHandle();
+    return NULL;
   }
   return info.selected_method();
 }
@@ -1572,24 +1573,24 @@
   return info.vtable_index();
 }
 
-methodHandle LinkResolver::resolve_static_call_or_null(const LinkInfo& link_info) {
+Method* LinkResolver::resolve_static_call_or_null(const LinkInfo& link_info) {
   EXCEPTION_MARK;
   CallInfo info;
   resolve_static_call(info, link_info, /*initialize_class*/false, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
-    return methodHandle();
+    return NULL;
   }
   return info.selected_method();
 }
 
-methodHandle LinkResolver::resolve_special_call_or_null(const LinkInfo& link_info) {
+Method* LinkResolver::resolve_special_call_or_null(const LinkInfo& link_info) {
   EXCEPTION_MARK;
   CallInfo info;
   resolve_special_call(info, Handle(), link_info, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CLEAR_PENDING_EXCEPTION;
-    return methodHandle();
+    return NULL;
   }
   return info.selected_method();
 }
@@ -1690,8 +1691,8 @@
          resolved_klass == SystemDictionary::VarHandle_klass(), "");
   assert(MethodHandles::is_signature_polymorphic_name(link_info.name()), "");
   Handle       resolved_appendix;
-  methodHandle resolved_method = lookup_polymorphic_method(link_info, &resolved_appendix, CHECK);
-  result.set_handle(resolved_klass, resolved_method, resolved_appendix, CHECK);
+  Method* resolved_method = lookup_polymorphic_method(link_info, &resolved_appendix, CHECK);
+  result.set_handle(resolved_klass, methodHandle(THREAD, resolved_method), resolved_appendix, CHECK);
 }
 
 void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHandle& pool, int indy_index, TRAPS) {
--- a/src/hotspot/share/interpreter/linkResolver.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/linkResolver.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -96,8 +96,8 @@
 
   Klass*  resolved_klass() const                 { return _resolved_klass; }
   Klass*  selected_klass() const                 { return _selected_klass; }
-  methodHandle resolved_method() const           { return _resolved_method; }
-  methodHandle selected_method() const           { return _selected_method; }
+  Method* resolved_method() const                { return _resolved_method(); }
+  Method* selected_method() const                { return _selected_method(); }
   Handle       resolved_appendix() const         { return _resolved_appendix; }
   Handle       resolved_method_name() const      { return _resolved_method_name; }
   // Materialize a java.lang.invoke.ResolvedMethodName for this resolved_method
@@ -181,7 +181,7 @@
   Symbol* signature() const          { return _signature; }
   Klass* resolved_klass() const      { return _resolved_klass; }
   Klass* current_klass() const       { return _current_klass; }
-  methodHandle current_method() const { return _current_method; }
+  Method* current_method() const     { return _current_method(); }
   constantTag tag() const            { return _tag; }
   bool check_access() const          { return _check_access; }
 
@@ -205,12 +205,12 @@
                                           bool in_imethod_resolve);
   static Method* lookup_method_in_interfaces(const LinkInfo& link_info);
 
-  static methodHandle lookup_polymorphic_method(const LinkInfo& link_info,
-                                                Handle *appendix_result_or_null, TRAPS);
+  static Method* lookup_polymorphic_method(const LinkInfo& link_info,
+                                           Handle *appendix_result_or_null, TRAPS);
  JVMCI_ONLY(public:) // Needed for CompilerToVM.resolveMethod()
   // Not Linktime so doesn't take LinkInfo
-  static methodHandle lookup_instance_method_in_klasses (Klass* klass, Symbol* name, Symbol* signature,
-                                                         Klass::PrivateLookupMode private_mode, TRAPS);
+  static Method* lookup_instance_method_in_klasses (Klass* klass, Symbol* name, Symbol* signature,
+                                                    Klass::PrivateLookupMode private_mode, TRAPS);
  JVMCI_ONLY(private:)
 
   // Similar loader constraint checking functions that throw
@@ -222,13 +222,13 @@
                                              Klass* current_klass,
                                              Klass* sel_klass, TRAPS);
 
-  static methodHandle resolve_interface_method(const LinkInfo& link_info, Bytecodes::Code code, TRAPS);
-  static methodHandle resolve_method          (const LinkInfo& link_info, Bytecodes::Code code, TRAPS);
+  static Method* resolve_interface_method(const LinkInfo& link_info, Bytecodes::Code code, TRAPS);
+  static Method* resolve_method          (const LinkInfo& link_info, Bytecodes::Code code, TRAPS);
 
-  static methodHandle linktime_resolve_static_method    (const LinkInfo& link_info, TRAPS);
-  static methodHandle linktime_resolve_special_method   (const LinkInfo& link_info, TRAPS);
-  static methodHandle linktime_resolve_virtual_method   (const LinkInfo& link_info, TRAPS);
-  static methodHandle linktime_resolve_interface_method (const LinkInfo& link_info, TRAPS);
+  static Method* linktime_resolve_static_method    (const LinkInfo& link_info, TRAPS);
+  static Method* linktime_resolve_special_method   (const LinkInfo& link_info, TRAPS);
+  static Method* linktime_resolve_virtual_method   (const LinkInfo& link_info, TRAPS);
+  static Method* linktime_resolve_interface_method (const LinkInfo& link_info, TRAPS);
 
   static void runtime_resolve_special_method    (CallInfo& result,
                                                  const LinkInfo& link_info,
@@ -285,9 +285,9 @@
 
   // static resolving calls (will not run any Java code);
   // used only from Bytecode_invoke::static_target
-  static methodHandle resolve_method_statically(Bytecodes::Code code,
-                                                const constantPoolHandle& pool,
-                                                int index, TRAPS);
+  static Method* resolve_method_statically(Bytecodes::Code code,
+                                           const constantPoolHandle& pool,
+                                           int index, TRAPS);
 
   static void resolve_field_access(fieldDescriptor& result,
                                    const constantPoolHandle& pool,
@@ -318,12 +318,12 @@
 
   // same as above for compile-time resolution; but returns null handle instead of throwing
   // an exception on error also, does not initialize klass (i.e., no side effects)
-  static methodHandle resolve_virtual_call_or_null  (Klass* receiver_klass,
-                                                     const LinkInfo& link_info);
-  static methodHandle resolve_interface_call_or_null(Klass* receiver_klass,
-                                                     const LinkInfo& link_info);
-  static methodHandle resolve_static_call_or_null   (const LinkInfo& link_info);
-  static methodHandle resolve_special_call_or_null  (const LinkInfo& link_info);
+  static Method* resolve_virtual_call_or_null(Klass* receiver_klass,
+                                              const LinkInfo& link_info);
+  static Method* resolve_interface_call_or_null(Klass* receiver_klass,
+                                                const LinkInfo& link_info);
+  static Method* resolve_static_call_or_null(const LinkInfo& link_info);
+  static Method* resolve_special_call_or_null(const LinkInfo& link_info);
 
   static int vtable_index_of_interface_method(Klass* klass, const methodHandle& resolved_method);
 
@@ -332,8 +332,8 @@
                                             const LinkInfo& link_info);
 
   // static resolving for compiler (does not throw exceptions, returns null handle if unsuccessful)
-  static methodHandle linktime_resolve_virtual_method_or_null  (const LinkInfo& link_info);
-  static methodHandle linktime_resolve_interface_method_or_null(const LinkInfo& link_info);
+  static Method* linktime_resolve_virtual_method_or_null  (const LinkInfo& link_info);
+  static Method* linktime_resolve_interface_method_or_null(const LinkInfo& link_info);
 
   // runtime resolving from constant pool
   static void resolve_invoke(CallInfo& result, Handle recv,
@@ -348,11 +348,11 @@
  public:
   // Only resolved method known.
   static void throw_abstract_method_error(const methodHandle& resolved_method, TRAPS) {
-    throw_abstract_method_error(resolved_method, NULL, NULL, CHECK);
+    throw_abstract_method_error(resolved_method, methodHandle(), NULL, CHECK);
   }
   // Resolved method and receiver klass know.
   static void throw_abstract_method_error(const methodHandle& resolved_method, Klass *recv_klass, TRAPS) {
-    throw_abstract_method_error(resolved_method, NULL, recv_klass, CHECK);
+    throw_abstract_method_error(resolved_method, methodHandle(), recv_klass, CHECK);
   }
   // Selected method is abstract.
   static void throw_abstract_method_error(const methodHandle& resolved_method,
--- a/src/hotspot/share/interpreter/rewriter.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/rewriter.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -80,13 +80,13 @@
 }
 
 // Unrewrite the bytecodes if an error occurs.
-void Rewriter::restore_bytecodes() {
+void Rewriter::restore_bytecodes(Thread* thread) {
   int len = _methods->length();
   bool invokespecial_error = false;
 
   for (int i = len-1; i >= 0; i--) {
     Method* method = _methods->at(i);
-    scan_method(method, true, &invokespecial_error);
+    scan_method(thread, method, true, &invokespecial_error);
     assert(!invokespecial_error, "reversing should not get an invokespecial error");
   }
 }
@@ -365,7 +365,7 @@
 
 
 // Rewrites a method given the index_map information
-void Rewriter::scan_method(Method* method, bool reverse, bool* invokespecial_error) {
+void Rewriter::scan_method(Thread* thread, Method* method, bool reverse, bool* invokespecial_error) {
 
   int nof_jsrs = 0;
   bool has_monitor_bytecodes = false;
@@ -439,7 +439,7 @@
           // succeeded. Therefore, the class is guaranteed to be well-formed.
           InstanceKlass* klass = method->method_holder();
           u2 bc_index = Bytes::get_Java_u2(bcp + prefix_length + 1);
-          constantPoolHandle cp(method->constants());
+          constantPoolHandle cp(thread, method->constants());
           Symbol* ref_class_name = cp->klass_name_at(cp->klass_ref_index_at(bc_index));
 
           if (klass->name() == ref_class_name) {
@@ -548,7 +548,7 @@
 
   for (int i = len-1; i >= 0; i--) {
     Method* method = _methods->at(i);
-    scan_method(method, false, &invokespecial_error);
+    scan_method(THREAD, method, false, &invokespecial_error);
     if (invokespecial_error) {
       // If you get an error here, there is no reversing bytecodes
       // This exception is stored for this class and no further attempt is
@@ -570,7 +570,8 @@
     assert(!klass->is_shared(), "archive methods must not be rewritten at run time");
   }
   ResourceMark rm(THREAD);
-  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
+  constantPoolHandle cpool(THREAD, klass->constants());
+  Rewriter     rw(klass, cpool, klass->methods(), CHECK);
   // (That's all, folks.)
 }
 
@@ -592,7 +593,7 @@
 
   // Stress restoring bytecodes
   if (StressRewriter) {
-    restore_bytecodes();
+    restore_bytecodes(THREAD);
     rewrite_bytecodes(CHECK);
   }
 
@@ -602,7 +603,7 @@
   // Restore bytecodes to their unrewritten state if there are exceptions
   // rewriting bytecodes or allocating the cpCache
   if (HAS_PENDING_EXCEPTION) {
-    restore_bytecodes();
+    restore_bytecodes(THREAD);
     return;
   }
 
@@ -620,7 +621,7 @@
       // relocating bytecodes.  If some are relocated, that is ok because that
       // doesn't affect constant pool to cpCache rewriting.
       if (HAS_PENDING_EXCEPTION) {
-        restore_bytecodes();
+        restore_bytecodes(THREAD);
         return;
       }
       // Method might have gotten rewritten.
--- a/src/hotspot/share/interpreter/rewriter.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/interpreter/rewriter.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -184,7 +184,7 @@
 
   void compute_index_maps();
   void make_constant_pool_cache(TRAPS);
-  void scan_method(Method* m, bool reverse, bool* invokespecial_error);
+  void scan_method(Thread* thread, Method* m, bool reverse, bool* invokespecial_error);
   void rewrite_Object_init(const methodHandle& m, TRAPS);
   void rewrite_member_reference(address bcp, int offset, bool reverse);
   void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse);
@@ -198,7 +198,7 @@
   void rewrite_bytecodes(TRAPS);
 
   // Revert bytecodes in case of an exception.
-  void restore_bytecodes();
+  void restore_bytecodes(Thread* thread);
 
   static methodHandle rewrite_jsrs(const methodHandle& m, TRAPS);
  public:
--- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -28,7 +28,7 @@
 #include "jfr/leakprofiler/chains/edgeStore.hpp"
 #include "jfr/leakprofiler/chains/edgeUtils.hpp"
 #include "jfr/leakprofiler/utilities/unifiedOop.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oopsHierarchy.hpp"
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -141,7 +141,7 @@
 
     // Make sure it's resolved first
     CallInfo callInfo;
-    constantPoolHandle cp(holder->constants());
+    constantPoolHandle cp(THREAD, holder->constants());
     ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index, true));
     Bytecodes::Code invoke_code = bytecode.invoke_code();
     if (!cp_cache_entry->is_resolved(invoke_code)) {
@@ -157,7 +157,7 @@
     Handle appendix(THREAD, cp_cache_entry->appendix_if_resolved(cp));
     Klass *appendix_klass = appendix.is_null() ? NULL : appendix->klass();
 
-    methodHandle adapter_method(cp_cache_entry->f1_as_method());
+    methodHandle adapter_method(THREAD, cp_cache_entry->f1_as_method());
     InstanceKlass *adapter_klass = adapter_method->method_holder();
 
     if (appendix_klass != NULL && appendix_klass->is_instance_klass()) {
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -203,8 +203,8 @@
     result = JVMCIENV->get_jvmci_type(klass, JVMCI_CATCH);
   } else if (h->is_method()) {
     Method* method = (Method*) h;
-    methodHandle mh(method);
-    result = JVMCIENV->get_jvmci_method(method, JVMCI_CATCH);
+    methodHandle mh(THREAD, method);
+    result = JVMCIENV->get_jvmci_method(mh, JVMCI_CATCH);
   }
   jobject ref = JVMCIENV->get_jobject(result);
   record_meta_ref(ref, index);
@@ -501,8 +501,8 @@
       int length = JVMCIENV->get_length(methods);
       for (int i = 0; i < length; ++i) {
         JVMCIObject method_handle = JVMCIENV->get_object_at(methods, i);
-        methodHandle method = jvmci_env()->asMethod(method_handle);
-        _dependencies->assert_evol_method(method());
+        Method* method = jvmci_env()->asMethod(method_handle);
+        _dependencies->assert_evol_method(method);
       }
     }
   }
@@ -620,13 +620,15 @@
       jvmci_env()->set_compile_state(compile_state);
     }
 
-    methodHandle method = jvmci_env()->asMethod(jvmci_env()->get_HotSpotCompiledNmethod_method(compiled_code));
+    Thread* thread = Thread::current();
+
+    methodHandle method(thread, jvmci_env()->asMethod(jvmci_env()->get_HotSpotCompiledNmethod_method(compiled_code)));
     jint entry_bci = jvmci_env()->get_HotSpotCompiledNmethod_entryBCI(compiled_code);
     bool has_unsafe_access = jvmci_env()->get_HotSpotCompiledNmethod_hasUnsafeAccess(compiled_code) == JNI_TRUE;
     jint id = jvmci_env()->get_HotSpotCompiledNmethod_id(compiled_code);
     if (id == -1) {
       // Make sure a valid compile_id is associated with every compile
-      id = CompileBroker::assign_compile_id_unlocked(Thread::current(), method, entry_bci);
+      id = CompileBroker::assign_compile_id_unlocked(thread, method, entry_bci);
       jvmci_env()->set_HotSpotCompiledNmethod_id(compiled_code, id);
     }
     if (!jvmci_env()->isa_HotSpotNmethod(installed_code)) {
@@ -659,7 +661,8 @@
 void CodeInstaller::initialize_fields(JVMCIObject target, JVMCIObject compiled_code, JVMCI_TRAPS) {
   if (jvmci_env()->isa_HotSpotCompiledNmethod(compiled_code)) {
     JVMCIObject hotspotJavaMethod = jvmci_env()->get_HotSpotCompiledNmethod_method(compiled_code);
-    methodHandle method = jvmci_env()->asMethod(hotspotJavaMethod);
+    Thread* thread = Thread::current();
+    methodHandle method(thread, jvmci_env()->asMethod(hotspotJavaMethod));
     _parameter_count = method->size_of_parameters();
     TRACE_jvmci_2("installing code for %s", method->name_and_sig_as_C_string());
   } else {
@@ -937,10 +940,10 @@
   JVMCIObject impl_handle = jvmci_env()->get_Assumptions_ConcreteMethod_impl(assumption);
   JVMCIObject context_handle = jvmci_env()->get_Assumptions_ConcreteMethod_context(assumption);
 
-  methodHandle impl = jvmci_env()->asMethod(impl_handle);
+  Method* impl = jvmci_env()->asMethod(impl_handle);
   Klass* context = jvmci_env()->asKlass(context_handle);
 
-  _dependencies->assert_unique_concrete_method(context, impl());
+  _dependencies->assert_unique_concrete_method(context, impl);
 }
 
 void CodeInstaller::assumption_CallSiteTargetValue(JVMCIObject assumption, JVMCI_TRAPS) {
@@ -1064,7 +1067,8 @@
   }
 
   JVMCIObject hotspot_method = jvmci_env()->get_BytecodePosition_method(position);
-  Method* method = jvmci_env()->asMethod(hotspot_method);
+  Thread* thread = Thread::current();
+  methodHandle method(thread, jvmci_env()->asMethod(hotspot_method));
   jint bci = map_jvmci_bci(jvmci_env()->get_BytecodePosition_bci(position));
   if (bci == jvmci_env()->get_BytecodeFrame_BEFORE_BCI()) {
     bci = SynchronizationEntryBCI;
@@ -1077,7 +1081,7 @@
     if (bci < 0){
        reexecute = false;
     } else {
-      Bytecodes::Code code = Bytecodes::java_code_at(method, method->bcp_from(bci));
+      Bytecodes::Code code = Bytecodes::java_code_at(method(), method->bcp_from(bci));
       reexecute = bytecode_should_reexecute(code);
       if (frame.is_non_null()) {
         reexecute = (jvmci_env()->get_BytecodeFrame_duringCall(frame) == JNI_FALSE);
--- a/src/hotspot/share/jvmci/jvmciCompiler.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompiler.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -67,7 +67,7 @@
   // Initialize compile queue with a selected set of methods.
   int len = objectMethods->length();
   for (int i = 0; i < len; i++) {
-    methodHandle mh = objectMethods->at(i);
+    methodHandle mh(THREAD, objectMethods->at(i));
     if (!mh->is_native() && !mh->is_static() && !mh->is_initializer()) {
       ResourceMark rm;
       int hot_count = 10; // TODO: what's the appropriate value?
@@ -100,7 +100,7 @@
   JVMCI::compiler_runtime()->bootstrap_finished(CHECK);
 }
 
-bool JVMCICompiler::force_comp_at_level_simple(Method *method) {
+bool JVMCICompiler::force_comp_at_level_simple(const methodHandle& method) {
   if (UseJVMCINativeLibrary) {
     // This mechanism exists to force compilation of a JVMCI compiler by C1
     // to reduces the compilation time spent on the JVMCI compiler itself. In
--- a/src/hotspot/share/jvmci/jvmciCompiler.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompiler.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -84,7 +84,7 @@
   void bootstrap(TRAPS);
 
   // Should force compilation of method at CompLevel_simple?
-  bool force_comp_at_level_simple(Method* method);
+  bool force_comp_at_level_simple(const methodHandle& method);
 
   bool is_bootstrapping() const { return _bootstrapping; }
 
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -34,6 +34,8 @@
 #include "jvmci/jvmciCompilerToVM.hpp"
 #include "jvmci/jvmciCodeInstaller.hpp"
 #include "jvmci/jvmciRuntime.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/universe.hpp"
 #include "oops/constantPool.inline.hpp"
@@ -252,7 +254,7 @@
 C2V_END
 
 C2V_VMENTRY_NULL(jbyteArray, getBytecode, (JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
 
   int code_size = method->code_size();
   jbyte* reconstituted_code = NEW_RESOURCE_ARRAY(jbyte, code_size);
@@ -330,12 +332,12 @@
 C2V_END
 
 C2V_VMENTRY_0(jint, getExceptionTableLength, (JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  Method* method = JVMCIENV->asMethod(jvmci_method);
   return method->exception_table_length();
 C2V_END
 
 C2V_VMENTRY_0(jlong, getExceptionTableStart, (JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  Method* method = JVMCIENV->asMethod(jvmci_method);
   if (method->exception_table_length() == 0) {
     return 0L;
   }
@@ -357,13 +359,13 @@
     slot = java_lang_reflect_Method::slot(executable);
   }
   Klass* holder = java_lang_Class::as_Klass(mirror);
-  methodHandle method = InstanceKlass::cast(holder)->method_with_idnum(slot);
+  methodHandle method (THREAD, InstanceKlass::cast(holder)->method_with_idnum(slot));
   JVMCIObject result = JVMCIENV->get_jvmci_method(method, JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(result);
 }
 
 C2V_VMENTRY_NULL(jobject, getResolvedJavaMethod, (JNIEnv* env, jobject, jobject base, jlong offset))
-  methodHandle method;
+  Method* method;
   JVMCIObject base_object = JVMCIENV->wrap(base);
   if (base_object.is_null()) {
     method = *((Method**)(offset));
@@ -377,16 +379,16 @@
   } else if (JVMCIENV->isa_HotSpotResolvedJavaMethodImpl(base_object)) {
     method = JVMCIENV->asMethod(base_object);
   }
-  if (method.is_null()) {
+  if (method == NULL) {
     JVMCI_THROW_MSG_NULL(IllegalArgumentException, err_msg("Unexpected type: %s", JVMCIENV->klass_name(base_object)));
   }
-  assert (method.is_null() || method->is_method(), "invalid read");
-  JVMCIObject result = JVMCIENV->get_jvmci_method(method, JVMCI_CHECK_NULL);
+  assert (method->is_method(), "invalid read");
+  JVMCIObject result = JVMCIENV->get_jvmci_method(methodHandle(THREAD, method), JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(result);
 }
 
 C2V_VMENTRY_NULL(jobject, getConstantPool, (JNIEnv* env, jobject, jobject object_handle))
-  constantPoolHandle cp;
+  ConstantPool* cp = NULL;
   JVMCIObject object = JVMCIENV->wrap(object_handle);
   if (object.is_null()) {
     JVMCI_THROW_NULL(NullPointerException);
@@ -399,9 +401,9 @@
     JVMCI_THROW_MSG_NULL(IllegalArgumentException,
                 err_msg("Unexpected type: %s", JVMCIENV->klass_name(object)));
   }
-  assert(!cp.is_null(), "npe");
+  assert(cp != NULL, "npe");
 
-  JVMCIObject result = JVMCIENV->get_jvmci_constant_pool(cp, JVMCI_CHECK_NULL);
+  JVMCIObject result = JVMCIENV->get_jvmci_constant_pool(constantPoolHandle(THREAD, cp), JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(result);
 }
 
@@ -449,7 +451,7 @@
 }
 
 C2V_VMENTRY_NULL(jobject, findUniqueConcreteMethod, (JNIEnv* env, jobject, jobject jvmci_type, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method (THREAD, JVMCIENV->asMethod(jvmci_method));
   Klass* holder = JVMCIENV->asKlass(jvmci_type);
   if (holder->is_interface()) {
     JVMCI_THROW_MSG_NULL(InternalError, err_msg("Interface %s should be handled in Java code", holder->external_name()));
@@ -461,7 +463,7 @@
   methodHandle ucm;
   {
     MutexLocker locker(Compile_lock);
-    ucm = Dependencies::find_unique_concrete_method(holder, method());
+    ucm = methodHandle(THREAD, Dependencies::find_unique_concrete_method(holder, method()));
   }
   JVMCIObject result = JVMCIENV->get_jvmci_method(ucm, JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(result);
@@ -485,25 +487,25 @@
 C2V_END
 
 C2V_VMENTRY_0(jboolean, methodIsIgnoredBySecurityStackWalk,(JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  Method* method = JVMCIENV->asMethod(jvmci_method);
   return method->is_ignored_by_security_stack_walk();
 C2V_END
 
 C2V_VMENTRY_0(jboolean, isCompilable,(JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
-  constantPoolHandle cp = method->constMethod()->constants();
-  assert(!cp.is_null(), "npe");
+  Method* method = JVMCIENV->asMethod(jvmci_method);
+  ConstantPool* cp = method->constMethod()->constants();
+  assert(cp != NULL, "npe");
   // don't inline method when constant pool contains a CONSTANT_Dynamic
   return !method->is_not_compilable(CompLevel_full_optimization) && !cp->has_dynamic_constant();
 C2V_END
 
 C2V_VMENTRY_0(jboolean, hasNeverInlineDirective,(JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method (THREAD, JVMCIENV->asMethod(jvmci_method));
   return !Inline || CompilerOracle::should_not_inline(method) || method->dont_inline();
 C2V_END
 
 C2V_VMENTRY_0(jboolean, shouldInlineMethod,(JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method (THREAD, JVMCIENV->asMethod(jvmci_method));
   return CompilerOracle::should_inline(method) || method->force_inline();
 C2V_END
 
@@ -611,35 +613,35 @@
 }
 
 C2V_VMENTRY_NULL(jobject, resolvePossiblyCachedConstantInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   oop result = cp->resolve_possibly_cached_constant_at(index, CHECK_NULL);
   return JVMCIENV->get_jobject(JVMCIENV->get_object_constant(result));
 C2V_END
 
 C2V_VMENTRY_0(jint, lookupNameAndTypeRefIndexInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   return cp->name_and_type_ref_index_at(index);
 C2V_END
 
 C2V_VMENTRY_NULL(jobject, lookupNameInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint which))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   JVMCIObject sym = JVMCIENV->create_string(cp->name_ref_at(which), JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(sym);
 C2V_END
 
 C2V_VMENTRY_NULL(jobject, lookupSignatureInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint which))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   JVMCIObject sym = JVMCIENV->create_string(cp->signature_ref_at(which), JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(sym);
 C2V_END
 
 C2V_VMENTRY_0(jint, lookupKlassRefIndexInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   return cp->klass_ref_index_at(index);
 C2V_END
 
 C2V_VMENTRY_NULL(jobject, resolveTypeInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   Klass* klass = cp->klass_at(index, CHECK_NULL);
   JVMCIKlassHandle resolved_klass(THREAD, klass);
   if (resolved_klass->is_instance_klass()) {
@@ -654,7 +656,7 @@
 C2V_END
 
 C2V_VMENTRY_NULL(jobject, lookupKlassInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index, jbyte opcode))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   Klass* loading_klass = cp->pool_holder();
   bool is_accessible = false;
   JVMCIKlassHandle klass(THREAD, JVMCIRuntime::get_klass_by_index(cp, index, is_accessible, loading_klass));
@@ -682,30 +684,31 @@
 C2V_END
 
 C2V_VMENTRY_NULL(jobject, lookupAppendixInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   oop appendix_oop = ConstantPool::appendix_at_if_loaded(cp, index);
   return JVMCIENV->get_jobject(JVMCIENV->get_object_constant(appendix_oop));
 C2V_END
 
 C2V_VMENTRY_NULL(jobject, lookupMethodInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index, jbyte opcode))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   InstanceKlass* pool_holder = cp->pool_holder();
   Bytecodes::Code bc = (Bytecodes::Code) (((int) opcode) & 0xFF);
-  methodHandle method = JVMCIRuntime::get_method_by_index(cp, index, bc, pool_holder);
+  methodHandle method(THREAD, JVMCIRuntime::get_method_by_index(cp, index, bc, pool_holder));
   JVMCIObject result = JVMCIENV->get_jvmci_method(method, JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(result);
 C2V_END
 
 C2V_VMENTRY_0(jint, constantPoolRemapInstructionOperandFromCache, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   return cp->remap_instruction_operand_from_cache(index);
 C2V_END
 
 C2V_VMENTRY_NULL(jobject, resolveFieldInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index, jobject jvmci_method, jbyte opcode, jintArray info_handle))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   Bytecodes::Code code = (Bytecodes::Code)(((int) opcode) & 0xFF);
   fieldDescriptor fd;
-  LinkInfo link_info(cp, index, (jvmci_method != NULL) ? JVMCIENV->asMethod(jvmci_method) : NULL, CHECK_0);
+  methodHandle mh(THREAD, (jvmci_method != NULL) ? JVMCIENV->asMethod(jvmci_method) : NULL);
+  LinkInfo link_info(cp, index, mh, CHECK_0);
   LinkResolver::resolve_field(fd, link_info, Bytecodes::java_code(code), false, CHECK_0);
   JVMCIPrimitiveArray info = JVMCIENV->wrap(info_handle);
   if (info.is_null() || JVMCIENV->get_length(info) != 3) {
@@ -721,7 +724,7 @@
 
 C2V_VMENTRY_0(jint, getVtableIndexForInterfaceMethod, (JNIEnv* env, jobject, jobject jvmci_type, jobject jvmci_method))
   Klass* klass = JVMCIENV->asKlass(jvmci_type);
-  Method* method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
   if (klass->is_interface()) {
     JVMCI_THROW_MSG_0(InternalError, err_msg("Interface %s should be handled in Java code", klass->external_name()));
   }
@@ -740,7 +743,7 @@
 C2V_VMENTRY_NULL(jobject, resolveMethod, (JNIEnv* env, jobject, jobject receiver_jvmci_type, jobject jvmci_method, jobject caller_jvmci_type))
   Klass* recv_klass = JVMCIENV->asKlass(receiver_jvmci_type);
   Klass* caller_klass = JVMCIENV->asKlass(caller_jvmci_type);
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
 
   Klass* resolved     = method->method_holder();
   Symbol* h_name      = method->name();
@@ -763,7 +766,7 @@
   }
 
   LinkInfo link_info(resolved, h_name, h_signature, caller_klass);
-  methodHandle m;
+  Method* m = NULL;
   // Only do exact lookup if receiver klass has been linked.  Otherwise,
   // the vtable has not been setup, and the LinkResolver will fail.
   if (recv_klass->is_array_klass() ||
@@ -775,12 +778,12 @@
     }
   }
 
-  if (m.is_null()) {
+  if (m == NULL) {
     // Return NULL if there was a problem with lookup (uninitialized class, etc.)
     return NULL;
   }
 
-  JVMCIObject result = JVMCIENV->get_jvmci_method(m, JVMCI_CHECK_NULL);
+  JVMCIObject result = JVMCIENV->get_jvmci_method(methodHandle(THREAD, m), JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(result);
 C2V_END
 
@@ -796,7 +799,8 @@
     return NULL;
   }
   InstanceKlass* iklass = InstanceKlass::cast(klass);
-  JVMCIObject result = JVMCIENV->get_jvmci_method(iklass->class_initializer(), JVMCI_CHECK_NULL);
+  methodHandle clinit(THREAD, iklass->class_initializer());
+  JVMCIObject result = JVMCIENV->get_jvmci_method(clinit, JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(result);
 C2V_END
 
@@ -811,7 +815,7 @@
 C2V_END
 
 C2V_VMENTRY(void, setNotInlinableOrCompilable,(JNIEnv* env, jobject,  jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
   method->set_not_c1_compilable();
   method->set_not_c2_compilable();
   method->set_dont_inline(true);
@@ -1007,7 +1011,7 @@
 C2V_VMENTRY_NULL(jobject, getStackTraceElement, (JNIEnv* env, jobject, jobject jvmci_method, int bci))
   HandleMark hm;
 
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
   JVMCIObject element = JVMCIENV->new_StackTraceElement(method, bci, JVMCI_CHECK_NULL);
   return JVMCIENV->get_jobject(element);
 C2V_END
@@ -1024,7 +1028,7 @@
   if (nm == NULL) {
     JVMCI_THROW_NULL(InvalidInstalledCodeException);
   }
-  methodHandle mh = nm->method();
+  methodHandle mh(THREAD, nm->method());
   Symbol* signature = mh->signature();
   JavaCallArguments jca(mh->size_of_parameters());
 
@@ -1102,7 +1106,7 @@
 C2V_END
 
 C2V_VMENTRY(void, reprofile, (JNIEnv* env, jobject, jobject jvmci_method))
-  Method* method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
   MethodCounters* mcs = method->method_counters();
   if (mcs != NULL) {
     mcs->clear_counters();
@@ -1159,7 +1163,7 @@
   if (jvmci_method == NULL) {
     JVMCI_THROW_0(NullPointerException);
   }
-  Method* method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
   if (entry_bci >= method->code_size() || entry_bci < -1) {
     JVMCI_THROW_MSG_0(IllegalArgumentException, err_msg("Unexpected bci %d", entry_bci));
   }
@@ -1201,7 +1205,7 @@
   LinkInfo link_info(spec_klass, name, signature);
   LinkResolver::resolve_interface_call(
           callinfo, receiver, recvrKlass, link_info, true, CHECK);
-  methodHandle method = callinfo.selected_method();
+  methodHandle method(THREAD, callinfo.selected_method());
   assert(method.not_null(), "should have thrown exception");
 
   // Invoke the method
@@ -1275,7 +1279,8 @@
 
             locals = cvf->locals();
             HotSpotJVMCI::HotSpotStackFrameReference::set_bci(JVMCIENV, frame_reference(), cvf->bci());
-            JVMCIObject method = JVMCIENV->get_jvmci_method(cvf->method(), JVMCI_CHECK_NULL);
+            methodHandle mh(THREAD, cvf->method());
+            JVMCIObject method = JVMCIENV->get_jvmci_method(mh, JVMCI_CHECK_NULL);
             HotSpotJVMCI::HotSpotStackFrameReference::set_method(JVMCIENV, frame_reference(), JNIHandles::resolve(method.as_jobject()));
           }
         }
@@ -1288,7 +1293,8 @@
           } else {
             locals = ivf->locals();
             HotSpotJVMCI::HotSpotStackFrameReference::set_bci(JVMCIENV, frame_reference(), ivf->bci());
-            JVMCIObject method = JVMCIENV->get_jvmci_method(ivf->method(), JVMCI_CHECK_NULL);
+            methodHandle mh(THREAD, ivf->method());
+            JVMCIObject method = JVMCIENV->get_jvmci_method(mh, JVMCI_CHECK_NULL);
             HotSpotJVMCI::HotSpotStackFrameReference::set_method(JVMCIENV, frame_reference(), JNIHandles::resolve(method.as_jobject()));
             HotSpotJVMCI::HotSpotStackFrameReference::set_localIsVirtual(JVMCIENV, frame_reference(), NULL);
           }
@@ -1368,7 +1374,7 @@
 C2V_END
 
 C2V_VMENTRY(void, resolveInvokeDynamicInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   CallInfo callInfo;
   LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, Bytecodes::_invokedynamic, CHECK);
   ConstantPoolCacheEntry* cp_cache_entry = cp->invokedynamic_cp_cache_entry_at(index);
@@ -1376,7 +1382,7 @@
 C2V_END
 
 C2V_VMENTRY(void, resolveInvokeHandleInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   Klass* holder = cp->klass_ref_at(index, CHECK);
   Symbol* name = cp->name_ref_at(index);
   if (MethodHandles::is_signature_polymorphic_name(holder, name)) {
@@ -1388,7 +1394,7 @@
 C2V_END
 
 C2V_VMENTRY_0(jint, isResolvedInvokeHandleInPool, (JNIEnv* env, jobject, jobject jvmci_constant_pool, jint index))
-  constantPoolHandle cp = JVMCIENV->asConstantPool(jvmci_constant_pool);
+  constantPoolHandle cp(THREAD, JVMCIENV->asConstantPool(jvmci_constant_pool));
   ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index));
   if (cp_cache_entry->is_resolved(Bytecodes::_invokehandle)) {
     // MethodHandle.invoke* --> LambdaForm?
@@ -1403,7 +1409,7 @@
     vmassert(MethodHandles::is_method_handle_invoke_name(resolved_klass, name_sym), "!");
     vmassert(MethodHandles::is_signature_polymorphic_name(resolved_klass, name_sym), "!");
 
-    methodHandle adapter_method(cp_cache_entry->f1_as_method());
+    methodHandle adapter_method(THREAD, cp_cache_entry->f1_as_method());
 
     methodHandle resolved_method(adapter_method);
 
@@ -1414,7 +1420,7 @@
       vmassert(!MethodHandles::is_signature_polymorphic_static(resolved_method->intrinsic_id()), "!");
       vmassert(cp_cache_entry->appendix_if_resolved(cp) == NULL, "!");
 
-      methodHandle m(LinkResolver::linktime_resolve_virtual_method_or_null(link_info));
+      methodHandle m(THREAD, LinkResolver::linktime_resolve_virtual_method_or_null(link_info));
       vmassert(m == resolved_method, "!!");
       return -1;
     }
@@ -1907,7 +1913,8 @@
   }
   JVMCIObjectArray methods = JVMCIENV->new_ResolvedJavaMethod_array(constructors_array.length(), JVMCI_CHECK_NULL);
   for (int i = 0; i < constructors_array.length(); i++) {
-    JVMCIObject method = JVMCIENV->get_jvmci_method(constructors_array.at(i), JVMCI_CHECK_NULL);
+    methodHandle ctor(THREAD, constructors_array.at(i));
+    JVMCIObject method = JVMCIENV->get_jvmci_method(ctor, JVMCI_CHECK_NULL);
     JVMCIENV->put_object_at(methods, i, method);
   }
   return JVMCIENV->get_jobjectArray(methods);
@@ -1936,7 +1943,8 @@
   }
   JVMCIObjectArray methods = JVMCIENV->new_ResolvedJavaMethod_array(methods_array.length(), JVMCI_CHECK_NULL);
   for (int i = 0; i < methods_array.length(); i++) {
-    JVMCIObject method = JVMCIENV->get_jvmci_method(methods_array.at(i), JVMCI_CHECK_NULL);
+    methodHandle mh(THREAD, methods_array.at(i));
+    JVMCIObject method = JVMCIENV->get_jvmci_method(mh, JVMCI_CHECK_NULL);
     JVMCIENV->put_object_at(methods, i, method);
   }
   return JVMCIENV->get_jobjectArray(methods);
@@ -2257,7 +2265,7 @@
 
   InstanceKlass* iklass = InstanceKlass::cast(klass);
   for (int i = 0; i < iklass->methods()->length(); i++) {
-    Method* method = iklass->methods()->at(i);
+    methodHandle method(THREAD, iklass->methods()->at(i));
     if (method->is_native()) {
 
       // Compute argument size
@@ -2296,11 +2304,9 @@
             method->name_and_sig_as_C_string(), p2i(method->native_function()), p2i(entry)));
       }
       method->set_native_function(entry, Method::native_bind_event_is_interesting);
-      if (PrintJNIResolving) {
-        tty->print_cr("[Dynamic-linking native method %s.%s ... JNI]",
-          method->method_holder()->external_name(),
-          method->name()->as_C_string());
-      }
+      log_debug(jni, resolve)("[Dynamic-linking native method %s.%s ... JNI]",
+                              method->method_holder()->external_name(),
+                              method->name()->as_C_string());
     }
   }
 
@@ -2425,7 +2431,7 @@
   JVMCIObject obj = thisEnv->wrap(obj_handle);
   JVMCIObject result;
   if (thisEnv->isa_HotSpotResolvedJavaMethodImpl(obj)) {
-    Method* method = thisEnv->asMethod(obj);
+    methodHandle method(THREAD, thisEnv->asMethod(obj));
     result = peerEnv->get_jvmci_method(method, JVMCI_CHECK_0);
   } else if (thisEnv->isa_HotSpotResolvedObjectTypeImpl(obj)) {
     Klass* klass = thisEnv->asKlass(obj);
@@ -2456,13 +2462,13 @@
     }
     if (result.is_null()) {
       JVMCIObject methodObject = thisEnv->get_HotSpotNmethod_method(obj);
-      methodHandle mh = thisEnv->asMethod(methodObject);
+      methodHandle mh(THREAD, thisEnv->asMethod(methodObject));
       jboolean isDefault = thisEnv->get_HotSpotNmethod_isDefault(obj);
       jlong compileIdSnapshot = thisEnv->get_HotSpotNmethod_compileIdSnapshot(obj);
       JVMCIObject name_string = thisEnv->get_InstalledCode_name(obj);
       const char* cstring = name_string.is_null() ? NULL : thisEnv->as_utf8_string(name_string);
       // Create a new HotSpotNmethod instance in the peer runtime
-      result = peerEnv->new_HotSpotNmethod(mh(), cstring, isDefault, compileIdSnapshot, JVMCI_CHECK_0);
+      result = peerEnv->new_HotSpotNmethod(mh, cstring, isDefault, compileIdSnapshot, JVMCI_CHECK_0);
       if (nm == NULL) {
         // nmethod must have been unloaded
       } else {
@@ -2522,7 +2528,7 @@
 
 C2V_VMENTRY_NULL(jobject, asReflectionExecutable, (JNIEnv* env, jobject, jobject jvmci_method))
   requireInHotSpot("asReflectionExecutable", JVMCI_CHECK_NULL);
-  methodHandle m = JVMCIENV->asMethod(jvmci_method);
+  methodHandle m(THREAD, JVMCIENV->asMethod(jvmci_method));
   oop executable;
   if (m->is_initializer()) {
     if (m->is_static_initializer()) {
@@ -2587,7 +2593,7 @@
 }
 
 C2V_VMENTRY_0(jlong, getFailedSpeculationsAddress, (JNIEnv* env, jobject, jobject jvmci_method))
-  methodHandle method = JVMCIENV->asMethod(jvmci_method);
+  methodHandle method(THREAD, JVMCIENV->asMethod(jvmci_method));
   MethodData* method_data = method->method_data();
   if (method_data == NULL) {
     ClassLoaderData* loader_data = method->method_holder()->class_loader_data();
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -228,7 +228,6 @@
   do_bool_flag(UseCompressedOops)                                          \
   X86_ONLY(do_bool_flag(UseCountLeadingZerosInstruction))                  \
   X86_ONLY(do_bool_flag(UseCountTrailingZerosInstruction))                 \
-  do_bool_flag(UseConcMarkSweepGC)                                         \
   do_bool_flag(UseG1GC)                                                    \
   do_bool_flag(UseParallelGC)                                              \
   do_bool_flag(UseParallelOldGC)                                           \
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -952,7 +952,7 @@
 JVMCIObject JVMCIEnv::new_HotSpotNmethod(const methodHandle& method, const char* name, jboolean isDefault, jlong compileId, JVMCI_TRAPS) {
   JavaThread* THREAD = JavaThread::current();
 
-  JVMCIObject methodObject = get_jvmci_method(method(), JVMCI_CHECK_(JVMCIObject()));
+  JVMCIObject methodObject = get_jvmci_method(method, JVMCI_CHECK_(JVMCIObject()));
 
   if (is_hotspot()) {
     InstanceKlass* ik = InstanceKlass::cast(HotSpotJVMCI::HotSpotNmethod::klass());
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -87,7 +87,7 @@
 
 #ifndef PRODUCT
 static void check_resolve_method(const char* call_type, Klass* resolved_klass, Symbol* method_name, Symbol* method_signature, TRAPS) {
-  methodHandle method;
+  Method* method;
   LinkInfo link_info(resolved_klass, method_name, method_signature, NULL, LinkInfo::skip_access_check);
   if (strcmp(call_type, "call_static") == 0) {
     method = LinkResolver::resolve_static_call_or_null(link_info);
@@ -98,7 +98,7 @@
   } else {
     fatal("Unknown or unsupported call type: %s", call_type);
   }
-  if (method.is_null()) {
+  if (method == NULL) {
     fatal("Could not resolve %s.%s%s", resolved_klass->external_name(), method_name->as_C_string(), method_signature->as_C_string());
   }
 }
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1150,16 +1150,16 @@
 // ------------------------------------------------------------------
 // Perform an appropriate method lookup based on accessor, holder,
 // name, signature, and bytecode.
-methodHandle JVMCIRuntime::lookup_method(InstanceKlass* accessor,
-                               Klass*        holder,
-                               Symbol*       name,
-                               Symbol*       sig,
-                               Bytecodes::Code bc,
-                               constantTag   tag) {
+Method* JVMCIRuntime::lookup_method(InstanceKlass* accessor,
+                                    Klass*        holder,
+                                    Symbol*       name,
+                                    Symbol*       sig,
+                                    Bytecodes::Code bc,
+                                    constantTag   tag) {
   // Accessibility checks are performed in JVMCIEnv::get_method_by_index_impl().
   assert(check_klass_accessibility(accessor, holder), "holder not accessible");
 
-  methodHandle dest_method;
+  Method* dest_method;
   LinkInfo link_info(holder, name, sig, accessor, LinkInfo::needs_access_check, tag);
   switch (bc) {
   case Bytecodes::_invokestatic:
@@ -1186,9 +1186,9 @@
 
 
 // ------------------------------------------------------------------
-methodHandle JVMCIRuntime::get_method_by_index_impl(const constantPoolHandle& cpool,
-                                          int index, Bytecodes::Code bc,
-                                          InstanceKlass* accessor) {
+Method* JVMCIRuntime::get_method_by_index_impl(const constantPoolHandle& cpool,
+                                               int index, Bytecodes::Code bc,
+                                               InstanceKlass* accessor) {
   if (bc == Bytecodes::_invokedynamic) {
     ConstantPoolCacheEntry* cpce = cpool->invokedynamic_cp_cache_entry_at(index);
     bool is_resolved = !cpce->is_f1_null();
@@ -1196,7 +1196,7 @@
       // Get the invoker Method* from the constant pool.
       // (The appendix argument, if any, will be noted in the method's signature.)
       Method* adapter = cpce->f1_as_method();
-      return methodHandle(adapter);
+      return adapter;
     }
 
     return NULL;
@@ -1235,8 +1235,8 @@
 
   if (holder_is_accessible) { // Our declared holder is loaded.
     constantTag tag = cpool->tag_ref_at(index);
-    methodHandle m = lookup_method(accessor, holder, name_sym, sig_sym, bc, tag);
-    if (!m.is_null()) {
+    Method* m = lookup_method(accessor, holder, name_sym, sig_sym, bc, tag);
+    if (m != NULL) {
       // We found the method.
       return m;
     }
@@ -1265,7 +1265,7 @@
 
 
 // ------------------------------------------------------------------
-methodHandle JVMCIRuntime::get_method_by_index(const constantPoolHandle& cpool,
+Method* JVMCIRuntime::get_method_by_index(const constantPoolHandle& cpool,
                                      int index, Bytecodes::Code bc,
                                      InstanceKlass* accessor) {
   ResourceMark rm;
--- a/src/hotspot/share/jvmci/jvmciRuntime.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmciRuntime.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -117,18 +117,18 @@
                                           Klass* loading_klass);
   static void   get_field_by_index_impl(InstanceKlass* loading_klass, fieldDescriptor& fd,
                                         int field_index);
-  static methodHandle  get_method_by_index_impl(const constantPoolHandle& cpool,
-                                                int method_index, Bytecodes::Code bc,
-                                                InstanceKlass* loading_klass);
+  static Method*  get_method_by_index_impl(const constantPoolHandle& cpool,
+                                           int method_index, Bytecodes::Code bc,
+                                           InstanceKlass* loading_klass);
 
   // Helper methods
   static bool       check_klass_accessibility(Klass* accessing_klass, Klass* resolved_klass);
-  static methodHandle  lookup_method(InstanceKlass*  accessor,
-                                     Klass*  holder,
-                                     Symbol*         name,
-                                     Symbol*         sig,
-                                     Bytecodes::Code bc,
-                                     constantTag     tag);
+  static Method*    lookup_method(InstanceKlass*  accessor,
+                                  Klass*  holder,
+                                  Symbol*         name,
+                                  Symbol*         sig,
+                                  Bytecodes::Code bc,
+                                  constantTag     tag);
 
  public:
   JVMCIRuntime() {
@@ -194,9 +194,9 @@
                                      Klass* loading_klass);
   static void   get_field_by_index(InstanceKlass* loading_klass, fieldDescriptor& fd,
                                    int field_index);
-  static methodHandle  get_method_by_index(const constantPoolHandle& cpool,
-                                           int method_index, Bytecodes::Code bc,
-                                           InstanceKlass* loading_klass);
+  static Method*  get_method_by_index(const constantPoolHandle& cpool,
+                                      int method_index, Bytecodes::Code bc,
+                                      InstanceKlass* loading_klass);
 
   // converts the Klass* representing the holder of a method into a
   // InstanceKlass*.  This is needed since the holder of a method in
--- a/src/hotspot/share/jvmci/jvmci_globals.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/jvmci/jvmci_globals.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -49,10 +49,14 @@
           "Enable JVMCI")                                                   \
                                                                             \
   experimental(bool, EnableJVMCIProduct, false,                             \
-          "Allow JVMCI to be used in product mode")                         \
+          "Allow JVMCI to be used in product mode. This alters a subset of "\
+          "JVMCI flags to be non-experimental, defaults UseJVMCICompiler "  \
+          "to true and defaults UseJVMCINativeLibrary to true if a JVMCI "  \
+          "native library is available.")                                   \
                                                                             \
   experimental(bool, UseJVMCICompiler, false,                               \
-          "Use JVMCI as the default compiler")                              \
+          "Use JVMCI as the default compiler. Defaults to true if "         \
+          "EnableJVMCIProduct is true.")                                    \
                                                                             \
   experimental(bool, JVMCIPrintProperties, false,                           \
           "Prints properties used by the JVMCI compiler and exits")         \
@@ -117,7 +121,8 @@
   experimental(bool, UseJVMCINativeLibrary, false,                          \
           "Execute JVMCI Java code from a shared library "                  \
           "instead of loading it from class files and executing it "        \
-          "on the HotSpot heap")                                            \
+          "on the HotSpot heap. Defaults to true if EnableJVMCIProduct is " \
+          "true and a JVMCI native library is available.")\
                                                                             \
   NOT_COMPILER2(diagnostic(bool, UseMultiplyToLenIntrinsic, false,          \
           "Enables intrinsification of BigInteger.multiplyToLen()"))        \
--- a/src/hotspot/share/logging/logPrefix.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/logging/logPrefix.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -57,6 +57,7 @@
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, ihop)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ergo, refine)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap)) \
+  LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap, numa)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, heap, region)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, freelist)) \
   LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, humongous)) \
--- a/src/hotspot/share/logging/logTag.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/logging/logTag.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -108,6 +108,7 @@
   LOG_TAG(nestmates) \
   LOG_TAG(nmethod) \
   LOG_TAG(normalize) \
+  LOG_TAG(numa) \
   LOG_TAG(objecttagging) \
   LOG_TAG(obsolete) \
   LOG_TAG(oldobject) \
--- a/src/hotspot/share/memory/allocation.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/allocation.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -283,11 +283,6 @@
     _shared_metaspace_top = top;
   }
 
-  static void expand_shared_metaspace_range(void* top) {
-    assert(top >= _shared_metaspace_top, "must be");
-    _shared_metaspace_top = top;
-  }
-
   static void* shared_metaspace_base() { return _shared_metaspace_base; }
   static void* shared_metaspace_top()  { return _shared_metaspace_top;  }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/archiveUtils.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "memory/archiveUtils.hpp"
+#include "memory/metaspace.hpp"
+#include "utilities/bitMap.inline.hpp"
+
+#if INCLUDE_CDS
+
+CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL;
+address* ArchivePtrMarker::_ptr_base;
+address* ArchivePtrMarker::_ptr_end;
+bool ArchivePtrMarker::_compacted;
+
+void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end) {
+  assert(_ptrmap == NULL, "initialize only once");
+  _ptr_base = ptr_base;
+  _ptr_end = ptr_end;
+  _compacted = false;
+  _ptrmap = ptrmap;
+
+  // Use this as initial guesstimate. We should need less space in the
+  // archive, but if we're wrong the bitmap will be expanded automatically.
+  size_t estimated_archive_size = MetaspaceGC::capacity_until_GC();
+  // But set it smaller in debug builds so we always test the expansion code.
+  // (Default archive is about 12MB).
+  DEBUG_ONLY(estimated_archive_size = 6 * M);
+
+  // We need one bit per pointer in the archive.
+  _ptrmap->initialize(estimated_archive_size / sizeof(intptr_t));
+}
+
+void ArchivePtrMarker::mark_pointer(address* ptr_loc) {
+  assert(_ptrmap != NULL, "not initialized");
+  assert(!_compacted, "cannot mark anymore");
+
+  if (_ptr_base <= ptr_loc && ptr_loc < _ptr_end) {
+    address value = *ptr_loc;
+    if (value != NULL) {
+      assert(uintx(ptr_loc) % sizeof(intptr_t) == 0, "pointers must be stored in aligned addresses");
+      size_t idx = ptr_loc - _ptr_base;
+      if (_ptrmap->size() <= idx) {
+        _ptrmap->resize((idx + 1) * 2);
+      }
+      assert(idx < _ptrmap->size(), "must be");
+      _ptrmap->set_bit(idx);
+      //tty->print_cr("Marking pointer [%p] -> %p @ " SIZE_FORMAT_W(9), ptr_loc, *ptr_loc, idx);
+    }
+  }
+}
+
+class ArchivePtrBitmapCleaner: public BitMapClosure {
+  CHeapBitMap* _ptrmap;
+  address* _ptr_base;
+  address  _relocatable_base;
+  address  _relocatable_end;
+  size_t   _max_non_null_offset;
+
+public:
+  ArchivePtrBitmapCleaner(CHeapBitMap* ptrmap, address* ptr_base, address relocatable_base, address relocatable_end) :
+    _ptrmap(ptrmap), _ptr_base(ptr_base),
+    _relocatable_base(relocatable_base), _relocatable_end(relocatable_end), _max_non_null_offset(0) {}
+
+  bool do_bit(size_t offset) {
+    address* ptr_loc = _ptr_base + offset;
+    address  ptr_value = *ptr_loc;
+    if (ptr_value != NULL) {
+      assert(_relocatable_base <= ptr_value && ptr_value < _relocatable_end, "do not point to arbitrary locations!");
+      if (_max_non_null_offset < offset) {
+        _max_non_null_offset = offset;
+      }
+    } else {
+      _ptrmap->clear_bit(offset);
+      DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT  "] -> NULL @ " SIZE_FORMAT_W(9), p2i(ptr_loc), offset));
+    }
+
+    return true;
+  }
+
+  size_t max_non_null_offset() const { return _max_non_null_offset; }
+};
+
+void ArchivePtrMarker::compact(address relocatable_base, address relocatable_end) {
+  assert(!_compacted, "cannot compact again");
+  ArchivePtrBitmapCleaner cleaner(_ptrmap, _ptr_base, relocatable_base, relocatable_end);
+  _ptrmap->iterate(&cleaner);
+  compact(cleaner.max_non_null_offset());
+}
+
+void ArchivePtrMarker::compact(size_t max_non_null_offset) {
+  assert(!_compacted, "cannot compact again");
+  _ptrmap->resize(max_non_null_offset + 1);
+  _compacted = true;
+}
+
+#endif // INCLUDE_CDS
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/archiveUtils.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_ARCHIVEUTILS_HPP
+#define SHARE_MEMORY_ARCHIVEUTILS_HPP
+
+#include "logging/log.hpp"
+#include "runtime/arguments.hpp"
+#include "utilities/bitMap.hpp"
+
+// ArchivePtrMarker is used to mark the location of pointers embedded in a CDS archive. E.g., when an
+// InstanceKlass k is dumped, we mark the location of the k->_name pointer by effectively calling
+// mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is
+// fixed, but _ptr_end can be expanded as more objects are dumped.
+class ArchivePtrMarker : AllStatic {
+  static CHeapBitMap* _ptrmap;
+  static address*     _ptr_base;
+  static address*     _ptr_end;
+
+  // Once _ptrmap is compacted, we don't allow bit marking anymore. This is to
+  // avoid unintentional copy operations after the bitmap has been finalized and written.
+  static bool         _compacted;
+public:
+  static void initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end);
+  static void mark_pointer(address* ptr_loc);
+  static void compact(address relocatable_base, address relocatable_end);
+  static void compact(size_t max_non_null_offset);
+
+  template <typename T>
+  static void mark_pointer(T* ptr_loc) {
+    mark_pointer((address*)ptr_loc);
+  }
+
+  static void expand_ptr_end(address *new_ptr_end) {
+    assert(_ptr_end <= new_ptr_end, "must be");
+    _ptr_end = new_ptr_end;
+  }
+
+  static CHeapBitMap* ptrmap() {
+    return _ptrmap;
+  }
+};
+
+// SharedDataRelocator is used to shift pointers in the CDS archive.
+//
+// The CDS archive is basically a contiguous block of memory (divided into several regions)
+// that contains multiple objects. The objects may contain direct pointers that point to other objects
+// within the archive (e.g., InstanceKlass::_name points to a Symbol in the archive). During dumping, we
+// built a bitmap that marks the locations of all these pointers (using ArchivePtrMarker, see comments above).
+//
+// The contents of the archive assumes that it’s mapped at the default SharedBaseAddress (e.g. 0x800000000).
+// If the archive ends up being mapped at a different address (e.g. 0x810000000), SharedDataRelocator
+// is used to shift each marked pointer by a delta (0x10000000 in this example), so that it points to
+// the actually mapped location of the target object.
+template <bool COMPACTING>
+class SharedDataRelocator: public BitMapClosure {
+  // for all (address** p), where (is_marked(p) && _patch_base <= p && p < _patch_end) { *p += delta; }
+
+  // Patch all pointers within this region that are marked.
+  address* _patch_base;
+  address* _patch_end;
+
+  // Before patching, all pointers must point to this region.
+  address _valid_old_base;
+  address _valid_old_end;
+
+  // After patching, all pointers must point to this region.
+  address _valid_new_base;
+  address _valid_new_end;
+
+  // How much to relocate for each pointer.
+  intx _delta;
+
+  // The following fields are used only when COMPACTING == true;
+  // The highest offset (inclusive) in the bitmap that contains a non-null pointer.
+  // This is used at dump time to reduce the size of the bitmap (which may have been over-allocated).
+  size_t _max_non_null_offset;
+  CHeapBitMap* _ptrmap;
+
+ public:
+  SharedDataRelocator(address* patch_base, address* patch_end,
+                      address valid_old_base, address valid_old_end,
+                      address valid_new_base, address valid_new_end, intx delta,
+                      CHeapBitMap* ptrmap = NULL) :
+    _patch_base(patch_base), _patch_end(patch_end),
+    _valid_old_base(valid_old_base), _valid_old_end(valid_old_end),
+    _valid_new_base(valid_new_base), _valid_new_end(valid_new_end),
+    _delta(delta) {
+    log_debug(cds, reloc)("SharedDataRelocator::_patch_base     = " PTR_FORMAT, p2i(_patch_base));
+    log_debug(cds, reloc)("SharedDataRelocator::_patch_end      = " PTR_FORMAT, p2i(_patch_end));
+    log_debug(cds, reloc)("SharedDataRelocator::_valid_old_base = " PTR_FORMAT, p2i(_valid_old_base));
+    log_debug(cds, reloc)("SharedDataRelocator::_valid_old_end  = " PTR_FORMAT, p2i(_valid_old_end));
+    log_debug(cds, reloc)("SharedDataRelocator::_valid_new_base = " PTR_FORMAT, p2i(_valid_new_base));
+    log_debug(cds, reloc)("SharedDataRelocator::_valid_new_end  = " PTR_FORMAT, p2i(_valid_new_end));
+    if (COMPACTING) {
+      assert(ptrmap != NULL, "must be");
+      _max_non_null_offset = 0;
+      _ptrmap = ptrmap;
+    } else {
+      // Don't touch the _max_non_null_offset and _ptrmap fields. Hopefully a good C++ compiler can
+      // elide them.
+      assert(ptrmap == NULL, "must be");
+    }
+  }
+
+  size_t max_non_null_offset() {
+    assert(COMPACTING, "must be");
+    return _max_non_null_offset;
+  }
+
+  inline bool do_bit(size_t offset);
+};
+
+
+#endif // SHARE_MEMORY_ARCHIVEUTILS_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/memory/archiveUtils.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP
+#define SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP
+
+#include "memory/archiveUtils.hpp"
+#include "utilities/bitMap.inline.hpp"
+
+template <bool COMPACTING>
+inline bool SharedDataRelocator<COMPACTING>::do_bit(size_t offset) {
+  address* p = _patch_base + offset;
+  assert(_patch_base <= p && p < _patch_end, "must be");
+
+  address old_ptr = *p;
+  assert(_valid_old_base <= old_ptr && old_ptr < _valid_old_end, "must be");
+
+  if (COMPACTING) {
+    // Start-up performance: use a template parameter to elide this block for run-time archive
+    // relocation.
+    assert(Arguments::is_dumping_archive(), "Don't do this during run-time archive loading!");
+    if (old_ptr == NULL) {
+      _ptrmap->clear_bit(offset);
+      DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT  "] -> NULL @ " SIZE_FORMAT_W(9), p2i(p), offset));
+      return true;
+    } else {
+      _max_non_null_offset = offset;
+    }
+  } else {
+    assert(old_ptr != NULL, "bits for NULL pointers should have been cleaned at dump time");
+  }
+
+  address new_ptr = old_ptr + _delta;
+  assert(_valid_new_base <= new_ptr && new_ptr < _valid_new_end, "must be");
+
+  DEBUG_ONLY(log_trace(cds, reloc)("Patch2: @%8d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT,
+                                   (int)offset, p2i(p), p2i(old_ptr), p2i(new_ptr)));
+  *p = new_ptr;
+  return true; // keep iterating
+}
+
+#endif // SHARE_MEMORY_ARCHIVEUTILS_INLINE_HPP
--- a/src/hotspot/share/memory/dynamicArchive.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/dynamicArchive.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -29,12 +29,13 @@
 #include "classfile/systemDictionary.hpp"
 #include "classfile/systemDictionaryShared.hpp"
 #include "logging/log.hpp"
+#include "memory/archiveUtils.inline.hpp"
+#include "memory/dynamicArchive.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspaceClosure.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
-#include "memory/dynamicArchive.hpp"
 #include "oops/compressedOops.hpp"
 #include "oops/objArrayKlass.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
@@ -50,7 +51,6 @@
 #endif
 
 class DynamicArchiveBuilder : ResourceObj {
-  CHeapBitMap _ptrmap;
   static unsigned my_hash(const address& a) {
     return primitive_hash<address>(a);
   }
@@ -64,7 +64,7 @@
       16384, ResourceObj::C_HEAP> RelocationTable;
   RelocationTable _new_loc_table;
 
-  intx _buffer_to_target_delta;
+  static intx _buffer_to_target_delta;
 
   DumpRegion* _current_dump_space;
 
@@ -77,10 +77,7 @@
 
 public:
   void mark_pointer(address* ptr_loc) {
-    if (is_in_buffer_space(ptr_loc)) {
-      size_t idx = pointer_delta(ptr_loc, _alloc_bottom, sizeof(address));
-      _ptrmap.set_bit(idx);
-    }
+    ArchivePtrMarker::mark_pointer(ptr_loc);
   }
 
   DumpRegion* current_dump_space() const {
@@ -128,6 +125,28 @@
     return pp != NULL;
   }
 
+  static int dynamic_dump_method_comparator(Method* a, Method* b) {
+    Symbol* a_name = a->name();
+    Symbol* b_name = b->name();
+
+    if (a_name == b_name) {
+      return 0;
+    }
+
+    if (!MetaspaceShared::is_in_shared_metaspace(a_name)) {
+      // a_name points to a Symbol in the top archive.
+      // When this method is called, a_name is still pointing to the output space.
+      // Translate it to point to the output space, so that it can be compared with
+      // Symbols in the base archive.
+      a_name = (Symbol*)(address(a_name) + _buffer_to_target_delta);
+    }
+    if (!MetaspaceShared::is_in_shared_metaspace(b_name)) {
+      b_name = (Symbol*)(address(b_name) + _buffer_to_target_delta);
+    }
+
+    return a_name->fast_compare(b_name);
+  }
+
 protected:
   enum FollowMode {
     make_a_copy, point_to_it, set_to_null
@@ -240,6 +259,16 @@
 
       return true; // keep recursing until every object is visited exactly once.
     }
+
+    virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
+      assert(type == _method_entry_ref, "only special type allowed for now");
+      address obj = ref->obj();
+      address new_obj = _builder->get_new_loc(ref);
+      size_t offset = pointer_delta(p, obj,  sizeof(u1));
+      intptr_t* new_p = (intptr_t*)(new_obj + offset);
+      assert(*p == *new_p, "must be a copy");
+      ArchivePtrMarker::mark_pointer((address*)new_p);
+    }
   };
 
   class EmbeddedRefUpdater: public MetaspaceClosure {
@@ -331,7 +360,7 @@
   public:
     EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {}
     virtual bool do_ref(Ref* ref, bool read_only) {
-      if (ref->not_null() && _builder->is_in_buffer_space(ref->obj())) {
+      if (ref->not_null()) {
         _builder->mark_pointer(ref->addr());
       }
       return false; // Do not recurse.
@@ -441,10 +470,10 @@
                             p2i(obj), p2i(p), bytes,
                             MetaspaceObj::type_name(ref->msotype()));
     memcpy(p, obj, bytes);
-
     intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), p);
     if (cloned_vtable != NULL) {
       update_pointer((address*)p, (address)cloned_vtable, "vtb", 0, /*is_mso_pointer*/false);
+      mark_pointer((address*)p);
     }
 
     return (address)p;
@@ -551,6 +580,9 @@
     address reserved_bottom = reserve_space_and_init_buffer_to_target_delta();
     init_header(reserved_bottom);
 
+    CHeapBitMap ptrmap;
+    ArchivePtrMarker::initialize(&ptrmap, (address*)reserved_bottom, (address*)current_dump_space()->top());
+
     verify_estimate_size(sizeof(DynamicArchiveHeader), "header");
 
     log_info(cds, dynamic)("Copying %d klasses and %d symbols",
@@ -576,10 +608,6 @@
       iterate_roots(&ro_copier);
     }
 
-    size_t bitmap_size = pointer_delta(current_dump_space()->top(),
-                                       _alloc_bottom, sizeof(address));
-    _ptrmap.initialize(bitmap_size);
-
     {
       log_info(cds)("Relocating embedded pointers ... ");
       ResourceMark rm;
@@ -653,7 +681,7 @@
       it->push(&_symbols->at(i));
     }
 
-    _header->shared_path_table_metaspace_pointers_do(it);
+    FileMapInfo::metaspace_pointers_do(it);
 
     // Do not call these again, as we have already collected all the classes and symbols
     // that we want to archive. Also, these calls would corrupt the tables when
@@ -666,6 +694,9 @@
   }
 };
 
+intx DynamicArchiveBuilder::_buffer_to_target_delta;
+
+
 size_t DynamicArchiveBuilder::estimate_archive_size() {
   // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
   _estimated_hashtable_bytes = 0;
@@ -688,26 +719,16 @@
 
 address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() {
   size_t total = estimate_archive_size();
-  bool large_pages = false; // No large pages when dumping the CDS archive.
-  size_t increment = align_up(1*G, reserve_alignment());
-  char* addr = (char*)align_up(CompressedKlassPointers::base() + MetaspaceSize + increment,
-                               reserve_alignment());
-
-  ReservedSpace* rs = MetaspaceShared::reserve_shared_rs(
-                          total, reserve_alignment(), large_pages, addr);
-  while (!rs->is_reserved() && (addr + increment > addr)) {
-    addr += increment;
-    rs = MetaspaceShared::reserve_shared_rs(
-           total, reserve_alignment(), large_pages, addr);
-  }
-  if (!rs->is_reserved()) {
+  ReservedSpace rs = MetaspaceShared::reserve_shared_space(total);
+  if (!rs.is_reserved()) {
     log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total);
     vm_direct_exit(0);
   }
 
-  address buffer_base = (address)rs->base();
+  address buffer_base = (address)rs.base();
   log_info(cds, dynamic)("Reserved output buffer space at    : " PTR_FORMAT " [%d bytes]",
                          p2i(buffer_base), (int)total);
+  MetaspaceShared::set_shared_rs(rs);
 
   // At run time, we will mmap the dynamic archive at target_space_bottom.
   // However, at dump time, we may not be able to write into the target_space,
@@ -788,6 +809,7 @@
 void DynamicArchiveBuilder::make_klasses_shareable() {
   int i, count = _klasses->length();
 
+  InstanceKlass::disable_method_binary_search();
   for (i = 0; i < count; i++) {
     InstanceKlass* ik = _klasses->at(i);
     sort_methods(ik);
@@ -806,7 +828,7 @@
       ik->set_class_loader_type(ClassLoader::APP_LOADER);
     }
 
-    MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik);
+    MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread::current(), ik);
     ik->remove_unshareable_info();
 
     assert(ik->array_klasses() == NULL, "sanity");
@@ -847,18 +869,24 @@
   }
 
 #ifdef ASSERT
-  {
+  if (ik->methods() != NULL) {
     for (int m = 0; m < ik->methods()->length(); m++) {
       Symbol* name = ik->methods()->at(m)->name();
       assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
     }
   }
+  if (ik->default_methods() != NULL) {
+    for (int m = 0; m < ik->default_methods()->length(); m++) {
+      Symbol* name = ik->default_methods()->at(m)->name();
+      assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be");
+    }
+  }
 #endif
 
   Thread* THREAD = Thread::current();
-  Method::sort_methods(ik->methods());
+  Method::sort_methods(ik->methods(), /*set_idnums=*/true, dynamic_dump_method_comparator);
   if (ik->default_methods() != NULL) {
-    Method::sort_methods(ik->default_methods(), /*set_idnums=*/false);
+    Method::sort_methods(ik->default_methods(), /*set_idnums=*/false, dynamic_dump_method_comparator);
   }
   ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
   ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail");
@@ -902,14 +930,60 @@
   }
 };
 
-
 void DynamicArchiveBuilder::relocate_buffer_to_target() {
   RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta);
-  _ptrmap.iterate(&patcher);
+  ArchivePtrMarker::ptrmap()->iterate(&patcher);
+
+  Array<u8>* table = FileMapInfo::shared_path_table().table();
+  SharedPathTable runtime_table(to_target(table), FileMapInfo::shared_path_table().size());
+  _header->set_shared_path_table(runtime_table);
+
+  address relocatable_base = (address)SharedBaseAddress;
+  address relocatable_end = (address)(current_dump_space()->top()) + _buffer_to_target_delta;
+
+  intx addr_delta = MetaspaceShared::final_delta();
+  if (addr_delta == 0) {
+    ArchivePtrMarker::compact(relocatable_base, relocatable_end);
+  } else {
+    // The base archive is NOT mapped at Arguments::default_SharedBaseAddress() (due to ASLR).
+    // This means that the current content of the dynamic archive is based on a random
+    // address. Let's relocate all the pointers, so that it can be mapped to
+    // Arguments::default_SharedBaseAddress() without runtime relocation.
+    //
+    // Note: both the base and dynamic archive are written with
+    // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress()
+
+    // Patch all pointers that are marked by ptrmap within this region,
+    // where we have just dumped all the metaspace data.
+    address patch_base = (address)_alloc_bottom;
+    address patch_end  = (address)current_dump_space()->top();
 
-  Array<u8>* table = _header->shared_path_table().table();
-  table = to_target(table);
- _header->relocate_shared_path_table(table);
+    // the current value of the pointers to be patched must be within this
+    // range (i.e., must point to either the top archive (as currently mapped), or to the
+    // (targeted address of) the top archive)
+    address valid_old_base = relocatable_base;
+    address valid_old_end  = relocatable_end;
+    size_t base_plus_top_size = valid_old_end - valid_old_base;
+    size_t top_size = patch_end - patch_base;
+    size_t base_size = base_plus_top_size - top_size;
+    assert(base_plus_top_size > base_size, "no overflow");
+    assert(base_plus_top_size > top_size, "no overflow");
+
+    // after patching, the pointers must point inside this range
+    // (the requested location of the archive, as mapped at runtime).
+    address valid_new_base = (address)Arguments::default_SharedBaseAddress();
+    address valid_new_end  = valid_new_base + base_plus_top_size;
+
+    log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
+                   "[" INTPTR_FORMAT " - " INTPTR_FORMAT "], delta = " INTX_FORMAT " bytes",
+                   p2i(patch_base + base_size), p2i(patch_end),
+                   p2i(valid_new_base + base_size), p2i(valid_new_end), addr_delta);
+
+    SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
+                                      valid_new_base, valid_new_end, addr_delta, ArchivePtrMarker::ptrmap());
+    ArchivePtrMarker::ptrmap()->iterate(&patcher);
+    ArchivePtrMarker::compact(patcher.max_non_null_offset());
+  }
 }
 
 void DynamicArchiveBuilder::write_regions(FileMapInfo* dynamic_info) {
@@ -925,6 +999,7 @@
                              MetaspaceShared::misc_code_dump_space()->base(),
                              MetaspaceShared::misc_code_dump_space()->used(),
                              /*read_only=*/false,/*allow_exec=*/true);
+  dynamic_info->write_bitmap_region(ArchivePtrMarker::ptrmap());
 }
 
 void DynamicArchiveBuilder::write_archive(char* serialized_data_start) {
@@ -940,6 +1015,7 @@
   const char* archive_name = Arguments::GetSharedDynamicArchivePath();
   dynamic_info->open_for_write(archive_name);
   write_regions(dynamic_info);
+  dynamic_info->set_final_requested_base((char*)Arguments::default_SharedBaseAddress());
   dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
   dynamic_info->write_header();
   dynamic_info->close();
@@ -948,6 +1024,8 @@
   address top  = address(current_dump_space()->top()) + _buffer_to_target_delta;
   size_t file_size = pointer_delta(top, base, sizeof(char));
 
+  base += MetaspaceShared::final_delta();
+  top += MetaspaceShared::final_delta();
   log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT
                          " [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
                          p2i(base), p2i(top), _header->header_size(), file_size);
@@ -1036,79 +1114,8 @@
 }
 
 
-static DynamicArchiveHeader *_dynamic_header = NULL;
 DynamicArchiveBuilder* DynamicArchive::_builder = NULL;
 
-void DynamicArchive::map_failed(FileMapInfo* mapinfo) {
-  if (mapinfo->dynamic_header() != NULL) {
-    os::free((void*)mapinfo->dynamic_header());
-  }
-  delete mapinfo;
-}
-
-// Returns the top of the mapped address space
-address DynamicArchive::map() {
-  assert(UseSharedSpaces, "Sanity");
-
-  // Create the dynamic archive map info
-  FileMapInfo* mapinfo;
-  const char* filename = Arguments::GetSharedDynamicArchivePath();
-  struct stat st;
-  address result;
-  if ((filename != NULL) && (os::stat(filename, &st) == 0)) {
-    mapinfo = new FileMapInfo(false);
-    if (!mapinfo->open_for_read(filename)) {
-      result = NULL;
-    }
-    result = map_impl(mapinfo);
-    if (result == NULL) {
-      map_failed(mapinfo);
-      mapinfo->restore_shared_path_table();
-    }
-  } else {
-    if (filename != NULL) {
-      log_warning(cds, dynamic)("specified dynamic archive doesn't exist: %s", filename);
-    }
-    result = NULL;
-  }
-  return result;
-}
-
-address DynamicArchive::map_impl(FileMapInfo* mapinfo) {
-  // Read header
-  if (!mapinfo->initialize(false)) {
-    return NULL;
-  }
-
-  _dynamic_header = mapinfo->dynamic_header();
-  int regions[] = {MetaspaceShared::rw,
-                   MetaspaceShared::ro,
-                   MetaspaceShared::mc};
-
-  size_t len = sizeof(regions)/sizeof(int);
-  char* saved_base[] = {NULL, NULL, NULL};
-  char* top = mapinfo->map_regions(regions, saved_base, len);
-  if (top == NULL) {
-    mapinfo->unmap_regions(regions, saved_base, len);
-    FileMapInfo::fail_continue("Unable to use dynamic archive. Failed map_region for using -Xshare:on.");
-    return NULL;
-  }
-
-  if (!validate(mapinfo)) {
-    return NULL;
-  }
-
-  if (_dynamic_header == NULL) {
-    return NULL;
-  }
-
-  intptr_t* buffer = (intptr_t*)_dynamic_header->serialized_data_start();
-  ReadClosure rc(&buffer);
-  SymbolTable::serialize_shared_table_header(&rc, false);
-  SystemDictionaryShared::serialize_dictionary_headers(&rc, false);
-
-  return (address)top;
-}
 
 bool DynamicArchive::validate(FileMapInfo* dynamic_info) {
   // Check if the recorded base archive matches with the current one
@@ -1136,11 +1143,3 @@
   }
   return true;
 }
-
-bool DynamicArchive::is_mapped() {
-  return (_dynamic_header != NULL);
-}
-
-void DynamicArchive::disable() {
-  _dynamic_header = NULL;
-}
--- a/src/hotspot/share/memory/dynamicArchive.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/dynamicArchive.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -99,13 +99,8 @@
   // archive?
   static bool is_in_target_space(void *obj);
 
-  static address map();
-  static bool is_mapped();
+  static bool is_mapped() { return FileMapInfo::dynamic_info() != NULL; }
   static bool validate(FileMapInfo* dynamic_info);
-  static void disable();
-private:
-  static address map_impl(FileMapInfo* mapinfo);
-  static void map_failed(FileMapInfo* mapinfo);
 };
 #endif // INCLUDE_CDS
 #endif // SHARE_VM_MEMORY_DYNAMICARCHIVE_HPP
--- a/src/hotspot/share/memory/filemap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/filemap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -34,6 +34,7 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "logging/logMessage.hpp"
+#include "memory/archiveUtils.inline.hpp"
 #include "memory/dynamicArchive.hpp"
 #include "memory/filemap.hpp"
 #include "memory/heapShared.inline.hpp"
@@ -55,6 +56,7 @@
 #include "runtime/vm_version.hpp"
 #include "services/memTracker.hpp"
 #include "utilities/align.hpp"
+#include "utilities/bitMap.inline.hpp"
 #include "utilities/classpathStream.hpp"
 #include "utilities/defaultStream.hpp"
 #if INCLUDE_G1GC
@@ -69,9 +71,6 @@
 #define O_BINARY 0     // otherwise do nothing.
 #endif
 
-extern address JVM_FunctionAtStart();
-extern address JVM_FunctionAtEnd();
-
 // Complain and stop. All error conditions occurring during the writing of
 // an archive file should stop the process.  Unrecoverable errors during
 // the reading of the archive file should stop the process.
@@ -104,12 +103,6 @@
 void FileMapInfo::fail_continue(const char *msg, ...) {
   va_list ap;
   va_start(ap, msg);
-  if (_dynamic_archive_info == NULL) {
-    MetaspaceShared::set_archive_loading_failed();
-  } else {
-    // _dynamic_archive_info has been setup after mapping the base archive
-    DynamicArchive::disable();
-  }
   if (PrintSharedArchiveAndExit && _validating_shared_path_table) {
     // If we are doing PrintSharedArchiveAndExit and some of the classpath entries
     // do not validate, we can still continue "limping" to validate the remaining
@@ -128,15 +121,6 @@
         ls.vprint_cr(msg, ap);
       }
     }
-    if (_dynamic_archive_info == NULL) {
-      UseSharedSpaces = false;
-      assert(current_info() != NULL, "singleton must be registered");
-      current_info()->close();
-    } else {
-      // We are failing when loading the top archive, but the base archive should
-      // continue to work.
-      log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s", _dynamic_archive_info->_full_path);
-    }
   }
   va_end(ap);
 }
@@ -227,9 +211,7 @@
   _narrow_oop_base = CompressedOops::base();
   _narrow_oop_shift = CompressedOops::shift();
   _max_heap_size = MaxHeapSize;
-  _narrow_klass_base = CompressedKlassPointers::base();
   _narrow_klass_shift = CompressedKlassPointers::shift();
-  _shared_path_table = mapinfo->_shared_path_table;
   if (HeapShared::is_heap_object_archiving_allowed()) {
     _heap_end = CompressedOops::end();
   }
@@ -249,11 +231,16 @@
   _verify_local = BytecodeVerificationLocal;
   _verify_remote = BytecodeVerificationRemote;
   _has_platform_or_app_classes = ClassLoaderExt::has_platform_or_app_classes();
-  _shared_base_address = SharedBaseAddress;
+  _requested_base_address = (char*)SharedBaseAddress;
+  _mapped_base_address = (char*)SharedBaseAddress;
   _allow_archiving_with_java_agent = AllowArchivingWithJavaAgent;
   // the following 2 fields will be set in write_header for dynamic archive header
   _base_archive_name_size = 0;
   _base_archive_is_default = false;
+
+  if (!DynamicDumpSharedSpaces) {
+    set_shared_path_table(mapinfo->_shared_path_table);
+  }
 }
 
 void SharedClassPathEntry::init_as_non_existent(const char* path, TRAPS) {
@@ -615,9 +602,11 @@
   return path_array;
 }
 
-bool FileMapInfo::fail(const char* msg, const char* name) {
+bool FileMapInfo::classpath_failure(const char* msg, const char* name) {
   ClassLoader::trace_class_path(msg, name);
-  MetaspaceShared::set_archive_loading_failed();
+  if (PrintSharedArchiveAndExit) {
+    MetaspaceShared::set_archive_loading_failed();
+  }
   return false;
 }
 
@@ -692,7 +681,7 @@
 
   if (mismatch) {
     // The paths are different
-    return fail("[BOOT classpath mismatch, actual =", runtime_boot_path);
+    return classpath_failure("[BOOT classpath mismatch, actual =", runtime_boot_path);
   }
   return true;
 }
@@ -703,7 +692,7 @@
   int rp_len = num_paths(appcp);
   bool mismatch = false;
   if (rp_len < shared_app_paths_len) {
-    return fail("Run time APP classpath is shorter than the one at dump time: ", appcp);
+    return classpath_failure("Run time APP classpath is shorter than the one at dump time: ", appcp);
   }
   if (shared_app_paths_len != 0 && rp_len != 0) {
     // Prefix is OK: E.g., dump with -cp foo.jar, but run with -cp foo.jar:bar.jar.
@@ -711,7 +700,7 @@
     GrowableArray<const char*>* rp_array = create_path_array(appcp);
     if (rp_array->length() == 0) {
       // None of the jar file specified in the runtime -cp exists.
-      return fail("None of the jar file specified in the runtime -cp exists: -Djava.class.path=", appcp);
+      return classpath_failure("None of the jar file specified in the runtime -cp exists: -Djava.class.path=", appcp);
     }
 
     // Handling of non-existent entries in the classpath: we eliminate all the non-existent
@@ -726,7 +715,7 @@
     int j = header()->app_class_paths_start_index();
     mismatch = check_paths(j, shared_app_paths_len, rp_array);
     if (mismatch) {
-      return fail("[APP classpath mismatch, actual: -Djava.class.path=", appcp);
+      return classpath_failure("[APP classpath mismatch, actual: -Djava.class.path=", appcp);
     }
   }
   return true;
@@ -952,8 +941,8 @@
 
 // Read the FileMapInfo information from the file.
 
-bool FileMapInfo::init_from_file(int fd, bool is_static) {
-  size_t sz = is_static ? sizeof(FileMapHeader) : sizeof(DynamicArchiveHeader);
+bool FileMapInfo::init_from_file(int fd) {
+  size_t sz = is_static() ? sizeof(FileMapHeader) : sizeof(DynamicArchiveHeader);
   size_t n = os::read(fd, header(), (unsigned int)sz);
   if (n != sz) {
     fail_continue("Unable to read the file header.");
@@ -965,7 +954,7 @@
     return false;
   }
 
-  unsigned int expected_magic = is_static ? CDS_ARCHIVE_MAGIC : CDS_DYNAMIC_ARCHIVE_MAGIC;
+  unsigned int expected_magic = is_static() ? CDS_ARCHIVE_MAGIC : CDS_DYNAMIC_ARCHIVE_MAGIC;
   if (header()->magic() != expected_magic) {
     log_info(cds)("_magic expected: 0x%08x", expected_magic);
     log_info(cds)("         actual: 0x%08x", header()->magic());
@@ -1016,7 +1005,7 @@
 
   _file_offset = n + header()->base_archive_name_size(); // accounts for the size of _base_archive_name
 
-  if (is_static) {
+  if (is_static()) {
     // just checking the last region is sufficient since the archive is written
     // in sequential order
     size_t len = lseek(fd, 0, SEEK_END);
@@ -1026,8 +1015,6 @@
       fail_continue("The shared archive file has been truncated.");
       return false;
     }
-
-    SharedBaseAddress = header()->shared_base_address();
   }
 
   return true;
@@ -1040,23 +1027,27 @@
 }
 
 // Read the FileMapInfo information from the file.
-bool FileMapInfo::open_for_read(const char* path) {
+bool FileMapInfo::open_for_read() {
   if (_file_open) {
     return true;
   }
-  if (path == NULL) {
+  if (is_static()) {
     _full_path = Arguments::GetSharedArchivePath();
   } else {
-    _full_path = path;
+    _full_path = Arguments::GetSharedDynamicArchivePath();
   }
   int fd = os::open(_full_path, O_RDONLY | O_BINARY, 0);
   if (fd < 0) {
-    if (errno == ENOENT) {
-      // Not locating the shared archive is ok.
-      fail_continue("Specified shared archive not found (%s).", _full_path);
+    if (is_static()) {
+      if (errno == ENOENT) {
+        // Not locating the shared archive is ok.
+        fail_continue("Specified shared archive not found (%s).", _full_path);
+      } else {
+        fail_continue("Failed to open shared archive file (%s).",
+                      os::strerror(errno));
+      }
     } else {
-      fail_continue("Failed to open shared archive file (%s).",
-                    os::strerror(errno));
+      log_warning(cds, dynamic)("specified dynamic archive doesn't exist: %s", _full_path);
     }
     return false;
   }
@@ -1127,25 +1118,35 @@
   }
 }
 
-void FileMapRegion::init(bool is_heap_region, char* base, size_t size, bool read_only,
+size_t FileMapRegion::used_aligned() const {
+  return align_up(used(), os::vm_allocation_granularity());
+}
+
+void FileMapRegion::init(int region_index, char* base, size_t size, bool read_only,
                          bool allow_exec, int crc) {
-  _is_heap_region = is_heap_region;
+  _is_heap_region = HeapShared::is_heap_region(region_index);
+  _is_bitmap_region = (region_index == MetaspaceShared::bm);
+  _mapping_offset = 0;
 
-  if (is_heap_region) {
+  if (_is_heap_region) {
     assert(!DynamicDumpSharedSpaces, "must be");
     assert((base - (char*)CompressedKlassPointers::base()) % HeapWordSize == 0, "Sanity");
     if (base != NULL) {
-      _addr._offset = (intx)CompressedOops::encode_not_null((oop)base);
-    } else {
-      _addr._offset = 0;
+      _mapping_offset = (size_t)CompressedOops::encode_not_null((oop)base);
+      assert(_mapping_offset >> 32 == 0, "must be 32-bit only");
     }
   } else {
-    _addr._base = base;
+    if (base != NULL) {
+      assert(base >= (char*)SharedBaseAddress, "must be");
+      _mapping_offset = base - (char*)SharedBaseAddress;
+    }
   }
   _used = size;
   _read_only = read_only;
   _allow_exec = allow_exec;
   _crc = crc;
+  _mapped_from_file = false;
+  _mapped_base = NULL;
 }
 
 void FileMapInfo::write_region(int region, char* base, size_t size,
@@ -1153,25 +1154,47 @@
   Arguments::assert_is_dumping_archive();
 
   FileMapRegion* si = space_at(region);
-  char* target_base = base;
-  if (DynamicDumpSharedSpaces) {
-    assert(!HeapShared::is_heap_region(region), "dynamic archive doesn't support heap regions");
-    target_base = DynamicArchive::buffer_to_target(base);
+  char* target_base;
+
+  if (region == MetaspaceShared::bm) {
+    target_base = NULL; // always NULL for bm region.
+  } else {
+    if (DynamicDumpSharedSpaces) {
+      assert(!HeapShared::is_heap_region(region), "dynamic archive doesn't support heap regions");
+      target_base = DynamicArchive::buffer_to_target(base);
+    } else {
+      target_base = base;
+    }
   }
 
   si->set_file_offset(_file_offset);
-  log_info(cds)("Shared file region %d: " SIZE_FORMAT_HEX_W(08)
+  char* requested_base = (target_base == NULL) ? NULL : target_base + MetaspaceShared::final_delta();
+  log_info(cds)("Shared file region  %d: " SIZE_FORMAT_HEX_W(08)
                 " bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08),
-                region, size, p2i(target_base), _file_offset);
+                region, size, p2i(requested_base), _file_offset);
 
   int crc = ClassLoader::crc32(0, base, (jint)size);
-  si->init(HeapShared::is_heap_region(region), target_base, size, read_only, allow_exec, crc);
+  si->init(region, target_base, size, read_only, allow_exec, crc);
 
   if (base != NULL) {
     write_bytes_aligned(base, size);
   }
 }
 
+
+void FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap) {
+  ResourceMark rm;
+  size_t size_in_bits = ptrmap->size();
+  size_t size_in_bytes = ptrmap->size_in_bytes();
+  uintptr_t* buffer = (uintptr_t*)NEW_RESOURCE_ARRAY(char, size_in_bytes);
+  ptrmap->write_to(buffer, size_in_bytes);
+  header()->set_ptrmap_size_in_bits(size_in_bits);
+
+  log_info(cds)("ptrmap = " INTPTR_FORMAT " (" SIZE_FORMAT " bytes)",
+                p2i(buffer), size_in_bytes);
+  write_region(MetaspaceShared::bm, (char*)buffer, size_in_bytes, /*read_only=*/true, /*allow_exec=*/false);
+}
+
 // Write out the given archive heap memory regions.  GC code combines multiple
 // consecutive archive GC regions into one MemRegion whenever possible and
 // produces the 'heap_mem' array.
@@ -1229,11 +1252,13 @@
       total_size += size;
     }
 
-    log_info(cds)("Archive heap region %d " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
+    log_info(cds)("Archive heap region %d: " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
                   i, p2i(start), p2i(start + size), size);
     write_region(i, start, size, false, false);
     if (size > 0) {
-      space_at(i)->init_oopmap(oopmaps->at(arr_idx)._oopmap,
+      address oopmap = oopmaps->at(arr_idx)._oopmap;
+      assert(oopmap >= (address)SharedBaseAddress, "must be");
+      space_at(i)->init_oopmap(oopmap - (address)SharedBaseAddress,
                                oopmaps->at(arr_idx)._oopmap_size_in_bits);
     }
   }
@@ -1285,6 +1310,9 @@
   align_file_position();
 }
 
+void FileMapInfo::set_final_requested_base(char* b) {
+  header()->set_final_requested_base(b);
+}
 
 // Close the shared archive file.  This does NOT unmap mapped regions.
 
@@ -1331,94 +1359,197 @@
   return true;
 }
 
-// Map the whole region at once, assumed to be allocated contiguously.
-ReservedSpace FileMapInfo::reserve_shared_memory() {
-  char* requested_addr = region_addr(0);
-  size_t size = FileMapInfo::core_spaces_size();
-
-  // Reserve the space first, then map otherwise map will go right over some
-  // other reserved memory (like the code cache).
-  ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
-  if (!rs.is_reserved()) {
-    fail_continue("Unable to reserve shared space at required address "
-                  INTPTR_FORMAT, p2i(requested_addr));
-    return rs;
-  }
-  // the reserved virtual memory is for mapping class data sharing archive
-  MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
-
-  return rs;
-}
-
 // Memory map a region in the address space.
-static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode",
+static const char* shared_region_name[] = { "MiscData", "ReadWrite", "ReadOnly", "MiscCode", "Bitmap",
                                             "String1", "String2", "OpenArchive1", "OpenArchive2" };
 
-char* FileMapInfo::map_regions(int regions[], char* saved_base[], size_t len) {
-  char* prev_top = NULL;
-  char* curr_base;
-  char* curr_top;
-  int i = 0;
-  for (i = 0; i < (int)len; i++) {
-    curr_base = map_region(regions[i], &curr_top);
-    if (curr_base == NULL) {
-      return NULL;
+MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs) {
+  DEBUG_ONLY(FileMapRegion* last_region = NULL);
+  intx addr_delta = mapped_base_address - header()->requested_base_address();
+
+  // Make sure we don't attempt to use header()->mapped_base_address() unless
+  // it's been successfully mapped.
+  DEBUG_ONLY(header()->set_mapped_base_address((char*)(uintptr_t)0xdeadbeef);)
+
+  for (int r = 0; r < num_regions; r++) {
+    int idx = regions[r];
+    MapArchiveResult result = map_region(idx, addr_delta, mapped_base_address, rs);
+    if (result != MAP_ARCHIVE_SUCCESS) {
+      return result;
     }
-    if (i > 0) {
-      // We require that mc->rw->ro->md to be laid out consecutively, with no
-      // gaps between them. That way, we can ensure that the OS won't be able to
-      // allocate any new memory spaces inside _shared_metaspace_{base,top}, which
-      // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
-      assert(curr_base == prev_top, "must be");
-    }
-    log_info(cds)("Mapped region #%d at base %p top %p", regions[i], curr_base, curr_top);
-    saved_base[i] = curr_base;
-    prev_top = curr_top;
+    FileMapRegion* si = space_at(idx);
+    DEBUG_ONLY(if (last_region != NULL) {
+        // Ensure that the OS won't be able to allocate new memory spaces between any mapped
+        // regions, or else it would mess up the simple comparision in MetaspaceObj::is_shared().
+        assert(si->mapped_base() == last_region->mapped_end(), "must have no gaps");
+      }
+      last_region = si;)
+    log_info(cds)("Mapped %s region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " (%s)", is_static() ? "static " : "dynamic",
+                  idx, p2i(si->mapped_base()), p2i(si->mapped_end()),
+                  shared_region_name[idx]);
+
   }
-  return curr_top;
+
+  DEBUG_ONLY(if (addr_delta == 0 && ArchiveRelocationMode == 1) {
+      // This is for simulating mmap failures at the requested address. We do it here (instead
+      // of MetaspaceShared::map_archives) so we can thoroughly test the code for failure handling
+      // (releasing all allocated resource, etc).
+      log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
+      return MAP_ARCHIVE_MMAP_FAILURE;
+    });
+
+  header()->set_mapped_base_address(header()->requested_base_address() + addr_delta);
+  if (addr_delta != 0 && !relocate_pointers(addr_delta)) {
+    return MAP_ARCHIVE_OTHER_FAILURE;
+  }
+
+  return MAP_ARCHIVE_SUCCESS;
 }
 
-char* FileMapInfo::map_region(int i, char** top_ret) {
+bool FileMapInfo::read_region(int i, char* base, size_t size) {
+  assert(MetaspaceShared::use_windows_memory_mapping(), "used by windows only");
+  FileMapRegion* si = space_at(i);
+  log_info(cds)("Commit %s region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " (%s)%s",
+                is_static() ? "static " : "dynamic", i, p2i(base), p2i(base + size),
+                shared_region_name[i], si->allow_exec() ? " exec" : "");
+  if (!os::commit_memory(base, size, si->allow_exec())) {
+    log_error(cds)("Failed to commit %s region #%d (%s)", is_static() ? "static " : "dynamic",
+                   i, shared_region_name[i]);
+    return false;
+  }
+  if (lseek(_fd, (long)si->file_offset(), SEEK_SET) != (int)si->file_offset() ||
+      read_bytes(base, size) != size) {
+    return false;
+  }
+  return true;
+}
+
+MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs) {
   assert(!HeapShared::is_heap_region(i), "sanity");
   FileMapRegion* si = space_at(i);
-  size_t used = si->used();
-  size_t alignment = os::vm_allocation_granularity();
-  size_t size = align_up(used, alignment);
-  char *requested_addr = region_addr(i);
+  size_t size = si->used_aligned();
+  char *requested_addr = mapped_base_address + si->mapping_offset();
+  assert(si->mapped_base() == NULL, "must be not mapped yet");
+  assert(requested_addr != NULL, "must be specified");
+
+  si->set_mapped_from_file(false);
 
-#ifdef _WINDOWS
-  // Windows cannot remap read-only shared memory to read-write when required for
-  // RedefineClasses, which is also used by JFR.  Always map windows regions as RW.
-  si->set_read_only(false);
-#else
-  // If a tool agent is in use (debugging enabled), or JFR, we must map the address space RW
-  if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space() ||
-      Arguments::has_jfr_option()) {
+  if (MetaspaceShared::use_windows_memory_mapping()) {
+    // Windows cannot remap read-only shared memory to read-write when required for
+    // RedefineClasses, which is also used by JFR.  Always map windows regions as RW.
+    si->set_read_only(false);
+  } else if (JvmtiExport::can_modify_any_class() || JvmtiExport::can_walk_any_space() ||
+             Arguments::has_jfr_option()) {
+    // If a tool agent is in use (debugging enabled), or JFR, we must map the address space RW
     si->set_read_only(false);
+  } else if (addr_delta != 0) {
+    si->set_read_only(false); // Need to patch the pointers
+  }
+
+  if (rs.is_reserved()) {
+    assert(rs.contains(requested_addr) && rs.contains(requested_addr + size - 1), "must be");
+    MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared);
   }
-#endif // _WINDOWS
 
-  // map the contents of the CDS archive in this memory
-  char *base = os::map_memory(_fd, _full_path, si->file_offset(),
-                              requested_addr, size, si->read_only(),
-                              si->allow_exec());
-  if (base == NULL || base != requested_addr) {
-    fail_continue("Unable to map %s shared space at required address.", shared_region_name[i]);
-    _memory_mapping_failed = true;
-    return NULL;
+  if (MetaspaceShared::use_windows_memory_mapping() && addr_delta != 0) {
+    // This is the second time we try to map the archive(s). We have already created a ReservedSpace
+    // that covers all the FileMapRegions to ensure all regions can be mapped. However, Windows
+    // can't mmap into a ReservedSpace, so we just os::read() the data. We're going to patch all the
+    // regions anyway, so there's no benefit for mmap anyway.
+    if (!read_region(i, requested_addr, size)) {
+      return MAP_ARCHIVE_OTHER_FAILURE; // oom or I/O error.
+    }
+  } else {
+    char* base = os::map_memory(_fd, _full_path, si->file_offset(),
+                                requested_addr, size, si->read_only(),
+                                si->allow_exec());
+    if (base != requested_addr) {
+      log_info(cds)("Unable to map %s shared space at required address.", shared_region_name[i]);
+      _memory_mapping_failed = true;
+      return MAP_ARCHIVE_MMAP_FAILURE;
+    }
+    si->set_mapped_from_file(true);
   }
-#ifdef _WINDOWS
-  // This call is Windows-only because the memory_type gets recorded for the other platforms
-  // in method FileMapInfo::reserve_shared_memory(), which is not called on Windows.
-  MemTracker::record_virtual_memory_type((address)base, mtClassShared);
-#endif
+  si->set_mapped_base(requested_addr);
+
+  if (!rs.is_reserved()) {
+    // When mapping on Windows with (addr_delta == 0), we don't reserve the address space for the regions
+    // (Windows can't mmap into a ReservedSpace). In this case, NMT requires we call it after
+    // os::map_memory has succeeded.
+    assert(MetaspaceShared::use_windows_memory_mapping(), "Windows memory mapping only");
+    MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared);
+  }
 
   if (VerifySharedSpaces && !verify_region_checksum(i)) {
+    return MAP_ARCHIVE_OTHER_FAILURE;
+  }
+
+  return MAP_ARCHIVE_SUCCESS;
+}
+
+char* FileMapInfo::map_relocation_bitmap(size_t& bitmap_size) {
+  FileMapRegion* si = space_at(MetaspaceShared::bm);
+  bitmap_size = si->used_aligned();
+  bool read_only = true, allow_exec = false;
+  char* requested_addr = NULL; // allow OS to pick any location
+  char* bitmap_base = os::map_memory(_fd, _full_path, si->file_offset(),
+                                     requested_addr, bitmap_size, read_only, allow_exec);
+  if (bitmap_base == NULL) {
+    log_error(cds)("failed to map relocation bitmap");
     return NULL;
   }
 
-  *top_ret = base + size;
-  return base;
+  if (VerifySharedSpaces && !region_crc_check(bitmap_base, bitmap_size, si->crc())) {
+    log_error(cds)("relocation bitmap CRC error");
+    if (!os::unmap_memory(bitmap_base, bitmap_size)) {
+      fatal("os::unmap_memory of relocation bitmap failed");
+    }
+    return NULL;
+  }
+
+  return bitmap_base;
+}
+
+bool FileMapInfo::relocate_pointers(intx addr_delta) {
+  log_debug(cds, reloc)("runtime archive relocation start");
+  size_t bitmap_size;
+  char* bitmap_base = map_relocation_bitmap(bitmap_size);
+
+  if (bitmap_base == NULL) {
+    return false;
+  } else {
+    size_t ptrmap_size_in_bits = header()->ptrmap_size_in_bits();
+    log_debug(cds, reloc)("mapped relocation bitmap @ " INTPTR_FORMAT " (" SIZE_FORMAT
+                          " bytes = " SIZE_FORMAT " bits)",
+                          p2i(bitmap_base), bitmap_size, ptrmap_size_in_bits);
+
+    BitMapView ptrmap((BitMap::bm_word_t*)bitmap_base, ptrmap_size_in_bits);
+
+    // Patch all pointers in the the mapped region that are marked by ptrmap.
+    address patch_base = (address)mapped_base();
+    address patch_end  = (address)mapped_end();
+
+    // the current value of the pointers to be patched must be within this
+    // range (i.e., must be between the requesed base address, and the of the current archive).
+    // Note: top archive may point to objects in the base archive, but not the other way around.
+    address valid_old_base = (address)header()->requested_base_address();
+    address valid_old_end  = valid_old_base + mapping_end_offset();
+
+    // after patching, the pointers must point inside this range
+    // (the requested location of the archive, as mapped at runtime).
+    address valid_new_base = (address)header()->mapped_base_address();
+    address valid_new_end  = (address)mapped_end();
+
+    SharedDataRelocator<false> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
+                                       valid_new_base, valid_new_end, addr_delta);
+    ptrmap.iterate(&patcher);
+
+    if (!os::unmap_memory(bitmap_base, bitmap_size)) {
+      fatal("os::unmap_memory of relocation bitmap failed");
+    }
+    log_debug(cds, reloc)("runtime archive relocation done");
+    return true;
+  }
 }
 
 size_t FileMapInfo::read_bytes(void* buffer, size_t count) {
@@ -1434,10 +1565,13 @@
 }
 
 address FileMapInfo::decode_start_address(FileMapRegion* spc, bool with_current_oop_encoding_mode) {
+  size_t offset = spc->mapping_offset();
+  assert((offset >> 32) == 0, "must be 32-bit only");
+  uint n = (uint)offset;
   if (with_current_oop_encoding_mode) {
-    return (address)CompressedOops::decode_not_null(spc->offset());
+    return (address)CompressedOops::decode_not_null(n);
   } else {
-    return (address)HeapShared::decode_from_archive(spc->offset());
+    return (address)HeapShared::decode_from_archive(n);
   }
 }
 
@@ -1705,7 +1839,7 @@
                                                         int first_region_idx) {
   for (int i=0; i<num_ranges; i++) {
     FileMapRegion* si = space_at(i + first_region_idx);
-    HeapShared::patch_archived_heap_embedded_pointers(ranges[i], (address)si->oopmap(),
+    HeapShared::patch_archived_heap_embedded_pointers(ranges[i], (address)(SharedBaseAddress + si->oopmap_offset()),
                                                       si->oopmap_size_in_bits());
   }
 }
@@ -1759,11 +1893,10 @@
   }
 }
 
-void FileMapInfo::unmap_regions(int regions[], char* saved_base[], size_t len) {
-  for (int i = 0; i < (int)len; i++) {
-    if (saved_base[i] != NULL) {
-      unmap_region(regions[i]);
-    }
+void FileMapInfo::unmap_regions(int regions[], int num_regions) {
+  for (int r = 0; r < num_regions; r++) {
+    int idx = regions[r];
+    unmap_region(idx);
   }
 }
 
@@ -1772,16 +1905,17 @@
 void FileMapInfo::unmap_region(int i) {
   assert(!HeapShared::is_heap_region(i), "sanity");
   FileMapRegion* si = space_at(i);
+  char* mapped_base = si->mapped_base();
   size_t used = si->used();
   size_t size = align_up(used, os::vm_allocation_granularity());
 
-  if (used == 0) {
-    return;
-  }
-
-  char* addr = region_addr(i);
-  if (!os::unmap_memory(addr, size)) {
-    fail_stop("Unable to unmap shared space.");
+  if (mapped_base != NULL && size > 0 && si->mapped_from_file()) {
+    log_info(cds)("Unmapping region #%d at base " INTPTR_FORMAT " (%s)", i, p2i(mapped_base),
+                  shared_region_name[i]);
+    if (!os::unmap_memory(mapped_base, size)) {
+      fatal("os::unmap_memory failed");
+    }
+    si->set_mapped_base(NULL);
   }
 }
 
@@ -1813,7 +1947,7 @@
 // [1] validate_header() - done here.
 // [2] validate_shared_path_table - this is done later, because the table is in the RW
 //     region of the archive, which is not mapped yet.
-bool FileMapInfo::initialize(bool is_static) {
+bool FileMapInfo::initialize() {
   assert(UseSharedSpaces, "UseSharedSpaces expected.");
 
   if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) {
@@ -1828,11 +1962,10 @@
   if (!open_for_read()) {
     return false;
   }
-
-  init_from_file(_fd, is_static);
-  // UseSharedSpaces could be disabled if the checking of some of the header fields in
-  // init_from_file has failed.
-  if (!UseSharedSpaces || !validate_header(is_static)) {
+  if (!init_from_file(_fd)) {
+    return false;
+  }
+  if (!validate_header()) {
     return false;
   }
   return true;
@@ -1845,10 +1978,18 @@
     return si->used() > 0 ?
           (char*)start_address_as_decoded_with_current_oop_encoding_mode(si) : NULL;
   } else {
-    return si->base();
+    return si->mapped_base();
   }
 }
 
+FileMapRegion* FileMapInfo::first_core_space() const {
+  return is_static() ? space_at(MetaspaceShared::mc) : space_at(MetaspaceShared::rw);
+}
+
+FileMapRegion* FileMapInfo::last_core_space() const {
+  return is_static() ? space_at(MetaspaceShared::md) : space_at(MetaspaceShared::mc);
+}
+
 int FileMapHeader::compute_crc() {
   char* start = (char*)this;
   // start computing from the field after _crc
@@ -1860,7 +2001,6 @@
 
 // This function should only be called during run time with UseSharedSpaces enabled.
 bool FileMapHeader::validate() {
-
   if (_obj_alignment != ObjectAlignmentInBytes) {
     FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
                   " does not equal the current ObjectAlignmentInBytes of " INTX_FORMAT ".",
@@ -1913,7 +2053,7 @@
   return true;
 }
 
-bool FileMapInfo::validate_header(bool is_static) {
+bool FileMapInfo::validate_header() {
   return header()->validate();
 }
 
@@ -1932,18 +2072,14 @@
 
 // Unmap mapped regions of shared space.
 void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
-  MetaspaceShared::set_shared_metaspace_range(NULL, NULL);
+  MetaspaceShared::set_shared_metaspace_range(NULL, NULL, NULL);
 
   FileMapInfo *map_info = FileMapInfo::current_info();
   if (map_info) {
     map_info->fail_continue("%s", msg);
     for (int i = 0; i < MetaspaceShared::num_non_heap_spaces; i++) {
       if (!HeapShared::is_heap_region(i)) {
-        char *addr = map_info->region_addr(i);
-        if (addr != NULL) {
-          map_info->unmap_region(i);
-          map_info->space_at(i)->mark_invalid();
-        }
+        map_info->unmap_region(i);
       }
     }
     // Dealloc the archive heap regions only without unmapping. The regions are part
--- a/src/hotspot/share/memory/filemap.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/filemap.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -43,6 +43,8 @@
 
 static const int JVM_IDENT_MAX = 256;
 
+class CHeapBitMap;
+
 class SharedClassPathEntry {
   enum {
     modules_image_entry,
@@ -104,6 +106,9 @@
   Array<u8>* _table;
   int _size;
 public:
+  SharedPathTable() : _table(NULL), _size(0) {}
+  SharedPathTable(Array<u8>* table, int size) : _table(table), _size(size) {}
+
   void dumptime_init(ClassLoaderData* loader_data, Thread* THREAD);
   void metaspace_pointers_do(MetaspaceClosure* it);
 
@@ -138,25 +143,29 @@
   }
 
   // Accessors
-  int crc()                      const { return _crc; }
-  size_t file_offset()           const { return _file_offset; }
-  char*  base()                  const { assert_is_not_heap_region(); return _addr._base;  }
-  narrowOop offset()             const { assert_is_heap_region();     return (narrowOop)(_addr._offset); }
-  size_t used()                  const { return _used; }
-  bool read_only()               const { return _read_only != 0; }
-  bool allow_exec()              const { return _allow_exec != 0; }
-  void* oopmap()                 const { return _oopmap; }
-  size_t oopmap_size_in_bits()   const { return _oopmap_size_in_bits; }
+  int crc()                         const { return _crc; }
+  size_t file_offset()              const { return _file_offset; }
+  size_t mapping_offset()           const { return _mapping_offset; }
+  size_t mapping_end_offset()       const { return _mapping_offset + used_aligned(); }
+  size_t used()                     const { return _used; }
+  size_t used_aligned()             const; // aligned up to os::vm_allocation_granularity()
+  char*  mapped_base()              const { assert_is_not_heap_region(); return _mapped_base; }
+  char*  mapped_end()               const { return mapped_base()        + used_aligned(); }
+  bool   read_only()                const { return _read_only != 0; }
+  bool   allow_exec()               const { return _allow_exec != 0; }
+  bool   mapped_from_file()         const { return _mapped_from_file != 0; }
+  size_t oopmap_offset()            const { assert_is_heap_region();     return _oopmap_offset; }
+  size_t oopmap_size_in_bits()      const { assert_is_heap_region();     return _oopmap_size_in_bits; }
 
-  void set_file_offset(size_t s) { _file_offset = s; }
-  void set_read_only(bool v)     { _read_only = v; }
-  void mark_invalid()            { _addr._base = NULL; }
-
-  void init(bool is_heap_region, char* base, size_t size, bool read_only,
+  void set_file_offset(size_t s)     { _file_offset = s; }
+  void set_read_only(bool v)         { _read_only = v; }
+  void set_mapped_base(char* p)      { _mapped_base = p; }
+  void set_mapped_from_file(bool v)  { _mapped_from_file = v; }
+  void init(int region_index, char* base, size_t size, bool read_only,
             bool allow_exec, int crc);
 
-  void init_oopmap(void* map, size_t size_in_bits) {
-    _oopmap = map;
+  void init_oopmap(size_t oopmap_offset, size_t size_in_bits) {
+    _oopmap_offset = oopmap_offset;
     _oopmap_size_in_bits = size_in_bits;
   }
 };
@@ -178,13 +187,10 @@
   uintx  _max_heap_size;            // java max heap size during dumping
   CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode
   int     _narrow_klass_shift;      // save narrow klass base and shift
-  address _narrow_klass_base;
-  char*   _misc_data_patching_start;
-  char*   _serialized_data_start;  // Data accessed using {ReadClosure,WriteClosure}::serialize()
-  address _i2i_entry_code_buffers;
+  size_t  _misc_data_patching_offset;
+  size_t  _serialized_data_offset;  // Data accessed using {ReadClosure,WriteClosure}::serialize()
+  size_t  _i2i_entry_code_buffers_offset;
   size_t  _i2i_entry_code_buffers_size;
-  size_t  _core_spaces_size;        // number of bytes allocated by the core spaces
-                                    // (mc, md, ro, rw and od).
   address _heap_end;                // heap end at dump time.
   bool _base_archive_is_default;    // indicates if the base archive is the system default one
 
@@ -202,7 +208,8 @@
   //      check_nonempty_dir_in_shared_path_table()
   //      validate_shared_path_table()
   //      validate_non_existent_class_paths()
-  SharedPathTable _shared_path_table;
+  size_t _shared_path_table_offset;
+  int    _shared_path_table_size;
 
   jshort _app_class_paths_start_index;  // Index of first app classpath entry
   jshort _app_module_paths_start_index; // Index of first module path entry
@@ -211,9 +218,19 @@
   bool   _verify_local;                 // BytecodeVerificationLocal setting
   bool   _verify_remote;                // BytecodeVerificationRemote setting
   bool   _has_platform_or_app_classes;  // Archive contains app classes
-  size_t _shared_base_address;          // SharedBaseAddress used at dump time
+  char*  _requested_base_address;       // Archive relocation is not necessary if we map with this base address.
+  char*  _mapped_base_address;          // Actual base address where archive is mapped.
+
   bool   _allow_archiving_with_java_agent; // setting of the AllowArchivingWithJavaAgent option
+  size_t _ptrmap_size_in_bits;          // Size of pointer relocation bitmap
 
+  char* from_mapped_offset(size_t offset) const {
+    return mapped_base_address() + offset;
+  }
+  void set_mapped_offset(char* p, size_t *offset) {
+    assert(p >= mapped_base_address(), "sanity");
+    *offset = p - mapped_base_address();
+  }
 public:
   // Accessors -- fields declared in CDSFileMapHeaderBase
   unsigned int magic() const {return _magic;}
@@ -234,19 +251,19 @@
   uintx max_heap_size()                    const { return _max_heap_size; }
   CompressedOops::Mode narrow_oop_mode()   const { return _narrow_oop_mode; }
   int narrow_klass_shift()                 const { return _narrow_klass_shift; }
-  address narrow_klass_base()              const { return _narrow_klass_base; }
-  char* misc_data_patching_start()         const { return _misc_data_patching_start; }
-  char* serialized_data_start()            const { return _serialized_data_start; }
-  address i2i_entry_code_buffers()         const { return _i2i_entry_code_buffers; }
+  address narrow_klass_base()              const { return (address)mapped_base_address(); }
+  char* misc_data_patching_start()         const { return from_mapped_offset(_misc_data_patching_offset); }
+  char* serialized_data_start()            const { return from_mapped_offset(_serialized_data_offset); }
+  address i2i_entry_code_buffers()         const { return (address)from_mapped_offset(_i2i_entry_code_buffers_offset); }
   size_t i2i_entry_code_buffers_size()     const { return _i2i_entry_code_buffers_size; }
-  size_t core_spaces_size()                const { return _core_spaces_size; }
   address heap_end()                       const { return _heap_end; }
   bool base_archive_is_default()           const { return _base_archive_is_default; }
   const char* jvm_ident()                  const { return _jvm_ident; }
   size_t base_archive_name_size()          const { return _base_archive_name_size; }
-  size_t shared_base_address()             const { return _shared_base_address; }
+  char* requested_base_address()           const { return _requested_base_address; }
+  char* mapped_base_address()              const { return _mapped_base_address; }
   bool has_platform_or_app_classes()       const { return _has_platform_or_app_classes; }
-  SharedPathTable shared_path_table()      const { return _shared_path_table; }
+  size_t ptrmap_size_in_bits()             const { return _ptrmap_size_in_bits; }
 
   // FIXME: These should really return int
   jshort max_used_path_index()             const { return _max_used_path_index; }
@@ -254,27 +271,32 @@
   jshort app_class_paths_start_index()     const { return _app_class_paths_start_index; }
   jshort num_module_paths()                const { return _num_module_paths; }
 
-  void set_core_spaces_size(size_t s)            { _core_spaces_size = s; }
   void set_has_platform_or_app_classes(bool v)   { _has_platform_or_app_classes = v; }
-  void set_misc_data_patching_start(char* p)     { _misc_data_patching_start = p; }
-  void set_serialized_data_start(char* p)        { _serialized_data_start   = p; }
+  void set_misc_data_patching_start(char* p)     { set_mapped_offset(p, &_misc_data_patching_offset); }
+  void set_serialized_data_start(char* p)        { set_mapped_offset(p, &_serialized_data_offset); }
   void set_base_archive_name_size(size_t s)      { _base_archive_name_size = s; }
   void set_base_archive_is_default(bool b)       { _base_archive_is_default = b; }
   void set_header_size(size_t s)                 { _header_size = s; }
-
+  void set_ptrmap_size_in_bits(size_t s)         { _ptrmap_size_in_bits = s; }
+  void set_mapped_base_address(char* p)          { _mapped_base_address = p; }
   void set_i2i_entry_code_buffers(address p, size_t s) {
-    _i2i_entry_code_buffers = p;
+    set_mapped_offset((char*)p, &_i2i_entry_code_buffers_offset);
     _i2i_entry_code_buffers_size = s;
   }
 
-  void relocate_shared_path_table(Array<u8>* t) {
-    assert(DynamicDumpSharedSpaces, "only");
-    _shared_path_table.set_table(t);
+  void set_shared_path_table(SharedPathTable table) {
+    set_mapped_offset((char*)table.table(), &_shared_path_table_offset);
+    _shared_path_table_size = table.size();
   }
 
-  void shared_path_table_metaspace_pointers_do(MetaspaceClosure* it) {
-    assert(DynamicDumpSharedSpaces, "only");
-    _shared_path_table.metaspace_pointers_do(it);
+  void set_final_requested_base(char* b) {
+    _requested_base_address = b;
+    _mapped_base_address = 0;
+  }
+
+  SharedPathTable shared_path_table() const {
+    return SharedPathTable((Array<u8>*)from_mapped_offset(_shared_path_table_offset),
+                           _shared_path_table_size);
   }
 
   bool validate();
@@ -301,6 +323,7 @@
 
   bool           _is_static;
   bool           _file_open;
+  bool           _is_mapped;
   int            _fd;
   size_t         _file_offset;
   const char*    _full_path;
@@ -327,8 +350,11 @@
   static bool get_base_archive_name_from_header(const char* archive_name,
                                                 int* size, char** base_archive_name);
   static bool check_archive(const char* archive_name, bool is_static);
+  static SharedPathTable shared_path_table() {
+    return _shared_path_table;
+  }
   void restore_shared_path_table();
-  bool init_from_file(int fd, bool is_static);
+  bool init_from_file(int fd);
   static void metaspace_pointers_do(MetaspaceClosure* it);
 
   void log_paths(const char* msg, int start_idx, int end_idx);
@@ -341,7 +367,7 @@
   void   set_header_crc(int crc)     { header()->set_crc(crc); }
   int    space_crc(int i)      const { return space_at(i)->crc(); }
   void   populate_header(size_t alignment);
-  bool   validate_header(bool is_static);
+  bool   validate_header();
   void   invalidate();
   int    crc()                 const { return header()->crc(); }
   int    version()             const { return header()->version(); }
@@ -370,11 +396,17 @@
     header()->set_i2i_entry_code_buffers(addr, s);
   }
 
-  void set_core_spaces_size(size_t s)         const { header()->set_core_spaces_size(s); }
-  size_t core_spaces_size()                   const { return header()->core_spaces_size(); }
+  bool is_static()                            const { return _is_static; }
+  bool is_mapped()                            const { return _is_mapped; }
+  void set_is_mapped(bool v)                        { _is_mapped = v; }
+  const char* full_path()                     const { return _full_path; }
+  void set_final_requested_base(char* b);
+
+  char* requested_base_address()           const { return header()->requested_base_address(); }
+
 
   class DynamicArchiveHeader* dynamic_header() const {
-    assert(!_is_static, "must be");
+    assert(!is_static(), "must be");
     return (DynamicArchiveHeader*)header();
   }
 
@@ -402,21 +434,21 @@
   static void assert_mark(bool check);
 
   // File manipulation.
-  bool  initialize(bool is_static) NOT_CDS_RETURN_(false);
-  bool  open_for_read(const char* path = NULL);
+  bool  initialize() NOT_CDS_RETURN_(false);
+  bool  open_for_read();
   void  open_for_write(const char* path = NULL);
   void  write_header();
   void  write_region(int region, char* base, size_t size,
                      bool read_only, bool allow_exec);
+  void  write_bitmap_region(const CHeapBitMap* ptrmap);
   size_t write_archive_heap_regions(GrowableArray<MemRegion> *heap_mem,
                                     GrowableArray<ArchiveHeapOopmapInfo> *oopmaps,
                                     int first_region_id, int max_num_regions);
   void  write_bytes(const void* buffer, size_t count);
   void  write_bytes_aligned(const void* buffer, size_t count);
   size_t  read_bytes(void* buffer, size_t count);
-  char* map_regions(int regions[], char* saved_base[], size_t len);
-  char* map_region(int i, char** top_ret);
-  void  map_heap_regions_impl() NOT_CDS_JAVA_HEAP_RETURN;
+  MapArchiveResult map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs);
+  void  unmap_regions(int regions[], int num_regions);
   void  map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
   void  fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
   void  patch_archived_heap_embedded_pointers() NOT_CDS_JAVA_HEAP_RETURN;
@@ -424,7 +456,6 @@
                                               int first_region_idx) NOT_CDS_JAVA_HEAP_RETURN;
   bool  has_heap_regions()  NOT_CDS_JAVA_HEAP_RETURN_(false);
   MemRegion get_heap_regions_range_with_current_oop_encoding_mode() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion());
-  void  unmap_regions(int regions[], char* saved_base[], size_t len);
   void  unmap_region(int i);
   bool  verify_region_checksum(int i);
   void  close();
@@ -452,6 +483,9 @@
   static void check_nonempty_dir_in_shared_path_table();
   bool validate_shared_path_table();
   void validate_non_existent_class_paths();
+  static void set_shared_path_table(FileMapInfo* info) {
+    _shared_path_table = info->header()->shared_path_table();
+  }
   static void update_jar_manifest(ClassPathEntry *cpe, SharedClassPathEntry* ent, TRAPS);
   static int num_non_existent_class_paths();
   static void record_non_existent_class_path_entry(const char* path);
@@ -475,12 +509,28 @@
 
   char* region_addr(int idx);
 
+  // The offset of the first core region in the archive, relative to SharedBaseAddress
+  size_t mapping_base_offset() const { return first_core_space()->mapping_offset(); }
+  // The offset of the (exclusive) end of the last core region in this archive, relative to SharedBaseAddress
+  size_t mapping_end_offset()  const { return last_core_space()->mapping_end_offset(); }
+
+  char* mapped_base()    const { return first_core_space()->mapped_base(); }
+  char* mapped_end()     const { return last_core_space()->mapped_end();   }
+
+  // Non-zero if the archive needs to be mapped a non-default location due to ASLR.
+  intx relocation_delta() const {
+    return header()->mapped_base_address() - header()->requested_base_address();
+  }
+
+  FileMapRegion* first_core_space() const;
+  FileMapRegion* last_core_space() const;
+
  private:
   void  seek_to_position(size_t pos);
   char* skip_first_path_entry(const char* path) NOT_CDS_RETURN_(NULL);
   int   num_paths(const char* path) NOT_CDS_RETURN_(0);
   GrowableArray<const char*>* create_path_array(const char* path) NOT_CDS_RETURN_(NULL);
-  bool  fail(const char* msg, const char* name) NOT_CDS_RETURN_(false);
+  bool  classpath_failure(const char* msg, const char* name) NOT_CDS_RETURN_(false);
   bool  check_paths(int shared_path_start_idx, int num_paths,
                     GrowableArray<const char*>* rp_array) NOT_CDS_RETURN_(false);
   bool  validate_boot_class_paths() NOT_CDS_RETURN_(false);
@@ -489,6 +539,11 @@
                       bool is_open = false) NOT_CDS_JAVA_HEAP_RETURN_(false);
   bool  region_crc_check(char* buf, size_t size, int expected_crc) NOT_CDS_RETURN_(false);
   void  dealloc_archive_heap_regions(MemRegion* regions, int num, bool is_open) NOT_CDS_JAVA_HEAP_RETURN;
+  void  map_heap_regions_impl() NOT_CDS_JAVA_HEAP_RETURN;
+  char* map_relocation_bitmap(size_t& bitmap_size);
+  MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs);
+  bool  read_region(int i, char* base, size_t size);
+  bool  relocate_pointers(intx addr_delta);
 
   FileMapRegion* space_at(int i) const {
     return header()->space_at(i);
--- a/src/hotspot/share/memory/heapShared.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/heapShared.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -26,19 +26,22 @@
 #include "classfile/javaClasses.inline.hpp"
 #include "classfile/stringTable.hpp"
 #include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionaryShared.hpp"
 #include "classfile/vmSymbols.hpp"
 #include "logging/log.hpp"
 #include "logging/logMessage.hpp"
 #include "logging/logStream.hpp"
+#include "memory/archiveUtils.hpp"
 #include "memory/filemap.hpp"
 #include "memory/heapShared.inline.hpp"
 #include "memory/iterator.inline.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/metaspaceClosure.hpp"
+#include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/compressedOops.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/safepointVerifiers.hpp"
@@ -383,8 +386,13 @@
           _k->external_name(), i, subgraph_k->external_name());
       }
       _subgraph_object_klasses->at_put(i, subgraph_k);
+      ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
     }
   }
+
+  ArchivePtrMarker::mark_pointer(&_k);
+  ArchivePtrMarker::mark_pointer(&_entry_field_records);
+  ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 }
 
 struct CopyKlassSubGraphInfoToArchive : StackObj {
@@ -397,7 +405,7 @@
         (ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
       record->init(&info);
 
-      unsigned int hash = primitive_hash<Klass*>(klass);
+      unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass);
       u4 delta = MetaspaceShared::object_delta_u4(record);
       _writer->add(hash, delta);
     }
@@ -436,7 +444,7 @@
   }
   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 
-  unsigned int hash = primitive_hash<Klass*>(k);
+  unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 
   // Initialize from archived data. Currently this is done only
@@ -606,8 +614,20 @@
   assert(orig_obj != NULL, "must be");
   assert(!is_archived_object(orig_obj), "sanity");
 
-  // java.lang.Class instances cannot be included in an archived
-  // object sub-graph.
+  if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
+    // This object has injected fields that cannot be supported easily, so we disallow them for now.
+    // If you get an error here, you probably made a change in the JDK library that has added
+    // these objects that are referenced (directly or indirectly) by static fields.
+    ResourceMark rm;
+    log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
+    vm_exit(1);
+  }
+
+  // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
+  // them as Klass::_archived_mirror because they need to be specially restored at run time.
+  //
+  // If you get an error here, you probably made a change in the JDK library that has added a Class
+  // object that is referenced (directly or indirectly) by static fields.
   if (java_lang_Class::is_instance(orig_obj)) {
     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
     vm_exit(1);
--- a/src/hotspot/share/memory/metaspace.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/metaspace.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -73,7 +73,6 @@
 
 volatile size_t MetaspaceGC::_capacity_until_GC = 0;
 uint MetaspaceGC::_shrink_factor = 0;
-bool MetaspaceGC::_should_concurrent_collect = false;
 
 // BlockFreelist methods
 
@@ -976,25 +975,18 @@
 #ifdef _LP64
 static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
 
-void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
+void Metaspace::set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, address cds_base) {
   assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
   // Figure out the narrow_klass_base and the narrow_klass_shift.  The
   // narrow_klass_base is the lower of the metaspace base and the cds base
   // (if cds is enabled).  The narrow_klass_shift depends on the distance
   // between the lower base and higher address.
-  address lower_base;
-  address higher_address;
-#if INCLUDE_CDS
-  if (UseSharedSpaces) {
-    higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
-                          (address)(metaspace_base + compressed_class_space_size()));
-    lower_base = MIN2(metaspace_base, cds_base);
-  } else
-#endif
-  {
-    higher_address = metaspace_base + compressed_class_space_size();
-    lower_base = metaspace_base;
-
+  address lower_base = (address)metaspace_rs.base();
+  address higher_address = (address)metaspace_rs.end();
+  if (cds_base != NULL) {
+    assert(UseSharedSpaces, "must be");
+    lower_base = MIN2(lower_base, cds_base);
+  } else {
     uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
     // If compressed class space fits in lower 32G, we don't need a base.
     if (higher_address <= (address)klass_encoding_max) {
@@ -1019,21 +1011,8 @@
   AOTLoader::set_narrow_klass_shift();
 }
 
-#if INCLUDE_CDS
-// Return TRUE if the specified metaspace_base and cds_base are close enough
-// to work with compressed klass pointers.
-bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
-  assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
-  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
-  address lower_base = MIN2((address)metaspace_base, cds_base);
-  address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
-                                (address)(metaspace_base + compressed_class_space_size()));
-  return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
-}
-#endif
-
 // Try to allocate the metaspace at the requested addr.
-void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
+void Metaspace::allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base) {
   assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
   assert(using_class_space(), "called improperly");
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
@@ -1046,14 +1025,16 @@
   // Don't use large pages for the class space.
   bool large_pages = false;
 
+ if (metaspace_rs.is_reserved()) {
+   // CDS should have already reserved the space.
+   assert(requested_addr == NULL, "not used");
+   assert(cds_base != NULL, "CDS should have already reserved the memory space");
+ } else {
+   assert(cds_base == NULL, "must be");
 #if !(defined(AARCH64) || defined(AIX))
-  ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
-                                             _reserve_alignment,
-                                             large_pages,
-                                             requested_addr);
+  metaspace_rs = ReservedSpace(compressed_class_space_size(), _reserve_alignment,
+                               large_pages, requested_addr);
 #else // AARCH64
-  ReservedSpace metaspace_rs;
-
   // Our compressed klass pointers may fit nicely into the lower 32
   // bits.
   if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
@@ -1078,19 +1059,6 @@
         increment = 4*G;
       }
 
-#if INCLUDE_CDS
-      if (UseSharedSpaces
-          && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
-        // We failed to find an aligned base that will reach.  Fall
-        // back to using our requested addr.
-        metaspace_rs = ReservedSpace(compressed_class_space_size(),
-                                     _reserve_alignment,
-                                     large_pages,
-                                     requested_addr);
-        break;
-      }
-#endif
-
       metaspace_rs = ReservedSpace(compressed_class_space_size(),
                                    _reserve_alignment,
                                    large_pages,
@@ -1099,53 +1067,30 @@
         break;
     }
   }
-
 #endif // AARCH64
+ }
 
   if (!metaspace_rs.is_reserved()) {
-#if INCLUDE_CDS
-    if (UseSharedSpaces) {
-      size_t increment = align_up(1*G, _reserve_alignment);
-
-      // Keep trying to allocate the metaspace, increasing the requested_addr
-      // by 1GB each time, until we reach an address that will no longer allow
-      // use of CDS with compressed klass pointers.
-      char *addr = requested_addr;
-      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
-             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
-        addr = addr + increment;
-        metaspace_rs = ReservedSpace(compressed_class_space_size(),
-                                     _reserve_alignment, large_pages, addr);
-      }
-    }
-#endif
+    assert(cds_base == NULL, "CDS should have already reserved the memory space");
     // If no successful allocation then try to allocate the space anywhere.  If
     // that fails then OOM doom.  At this point we cannot try allocating the
     // metaspace as if UseCompressedClassPointers is off because too much
     // initialization has happened that depends on UseCompressedClassPointers.
     // So, UseCompressedClassPointers cannot be turned off at this point.
+    metaspace_rs = ReservedSpace(compressed_class_space_size(),
+                                 _reserve_alignment, large_pages);
     if (!metaspace_rs.is_reserved()) {
-      metaspace_rs = ReservedSpace(compressed_class_space_size(),
-                                   _reserve_alignment, large_pages);
-      if (!metaspace_rs.is_reserved()) {
-        vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
-                                              compressed_class_space_size()));
-      }
+      vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
+                                            compressed_class_space_size()));
     }
   }
 
-  // If we got here then the metaspace got allocated.
-  MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
+  if (cds_base == NULL) {
+    // If we got here then the metaspace got allocated.
+    MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
+  }
 
-#if INCLUDE_CDS
-  // Verify that we can use shared spaces.  Otherwise, turn off CDS.
-  if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
-    FileMapInfo::stop_sharing_and_unmap(
-        "Could not allocate metaspace at a compatible address");
-  }
-#endif
-  set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
-                                  UseSharedSpaces ? (address)cds_base : 0);
+  set_narrow_klass_base_and_shift(metaspace_rs, cds_base);
 
   initialize_class_space(metaspace_rs);
 
@@ -1248,31 +1193,30 @@
 void Metaspace::global_initialize() {
   MetaspaceGC::initialize();
 
+  bool class_space_inited = false;
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
     MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
+    class_space_inited = true;
   } else if (UseSharedSpaces) {
     // If any of the archived space fails to map, UseSharedSpaces
-    // is reset to false. Fall through to the
-    // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
-    // metaspace.
+    // is reset to false.
     MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
+    class_space_inited = UseSharedSpaces;
   }
 
   if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
     vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
   }
+#endif // INCLUDE_CDS
 
-  if (!DumpSharedSpaces && !UseSharedSpaces)
-#endif // INCLUDE_CDS
-  {
 #ifdef _LP64
-    if (using_class_space()) {
-      char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
-      allocate_metaspace_compressed_klass_ptrs(base, 0);
-    }
-#endif // _LP64
+  if (using_class_space() && !class_space_inited) {
+    char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
+    ReservedSpace dummy;
+    allocate_metaspace_compressed_klass_ptrs(dummy, base, 0);
   }
+#endif
 
   // Initialize these before initializing the VirtualSpaceList
   _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
--- a/src/hotspot/share/memory/metaspace.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/metaspace.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -172,16 +172,13 @@
     assert(!_frozen, "sanity");
   }
 #ifdef _LP64
-  static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
+  static void allocate_metaspace_compressed_klass_ptrs(ReservedSpace metaspace_rs, char* requested_addr, address cds_base);
 #endif
 
  private:
 
 #ifdef _LP64
-  static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
-
-  // Returns true if can use CDS with metaspace allocated as specified address.
-  static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
+  static void set_narrow_klass_base_and_shift(ReservedSpace metaspace_rs, address cds_base);
 
   static void initialize_class_space(ReservedSpace rs);
 #endif
@@ -439,11 +436,6 @@
   // When committed memory of all metaspaces reaches this value,
   // a GC is induced and the value is increased. Size is in bytes.
   static volatile size_t _capacity_until_GC;
-
-  // For a CMS collection, signal that a concurrent collection should
-  // be started.
-  static bool _should_concurrent_collect;
-
   static uint _shrink_factor;
 
   static size_t shrink_factor() { return _shrink_factor; }
@@ -461,11 +453,6 @@
                                     bool* can_retry = NULL);
   static size_t dec_capacity_until_GC(size_t v);
 
-  static bool should_concurrent_collect() { return _should_concurrent_collect; }
-  static void set_should_concurrent_collect(bool v) {
-    _should_concurrent_collect = v;
-  }
-
   // The amount to increase the high-water-mark (_capacity_until_GC)
   static size_t delta_capacity_until_GC(size_t bytes);
 
--- a/src/hotspot/share/memory/metaspaceClosure.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/metaspaceClosure.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -75,6 +75,10 @@
     _default
   };
 
+  enum SpecialRef {
+    _method_entry_ref
+  };
+
   // class MetaspaceClosure::Ref --
   //
   // MetaspaceClosure can be viewed as a very simple type of copying garbage
@@ -278,6 +282,16 @@
   template <class T> void push(T** mpp, Writability w = _default) {
     push_impl(new ObjectRef<T>(mpp, w));
   }
+
+  template <class T> void push_method_entry(T** mpp, intptr_t* p) {
+    push_special(_method_entry_ref, new ObjectRef<T>(mpp, _default), (intptr_t*)p);
+  }
+
+  // This is for tagging special pointers that are not a reference to MetaspaceObj. It's currently
+  // used to mark the method entry points in Method/ConstMethod.
+  virtual void push_special(SpecialRef type, Ref* obj, intptr_t* p) {
+    assert(type == _method_entry_ref, "only special type allowed for now");
+  }
 };
 
 // This is a special MetaspaceClosure that visits each unique MetaspaceObj once.
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -41,6 +41,8 @@
 #include "interpreter/bytecodes.hpp"
 #include "logging/log.hpp"
 #include "logging/logMessage.hpp"
+#include "memory/archiveUtils.inline.hpp"
+#include "memory/dynamicArchive.hpp"
 #include "memory/filemap.hpp"
 #include "memory/heapShared.inline.hpp"
 #include "memory/metaspace.hpp"
@@ -48,7 +50,6 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
-#include "memory/dynamicArchive.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceMirrorKlass.hpp"
@@ -67,7 +68,7 @@
 #include "runtime/vmThread.hpp"
 #include "runtime/vmOperations.hpp"
 #include "utilities/align.hpp"
-#include "utilities/bitMap.hpp"
+#include "utilities/bitMap.inline.hpp"
 #include "utilities/defaultStream.hpp"
 #include "utilities/hashtable.inline.hpp"
 #if INCLUDE_G1GC
@@ -82,8 +83,8 @@
 bool MetaspaceShared::_remapped_readwrite = false;
 address MetaspaceShared::_i2i_entry_code_buffers = NULL;
 size_t MetaspaceShared::_i2i_entry_code_buffers_size = 0;
-size_t MetaspaceShared::_core_spaces_size = 0;
 void* MetaspaceShared::_shared_metaspace_static_top = NULL;
+intx MetaspaceShared::_relocation_delta;
 
 // The CDS archive is divided into the following regions:
 //     mc  - misc code (the method entry trampolines)
@@ -147,9 +148,21 @@
   return p;
 }
 
+void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
+  assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
+  intptr_t *p = (intptr_t*)_top;
+  char* newtop = _top + sizeof(intptr_t);
+  expand_top_to(newtop);
+  *p = n;
+  if (need_to_mark) {
+    ArchivePtrMarker::mark_pointer(p);
+  }
+}
+
 void DumpRegion::print(size_t total_bytes) const {
   tty->print_cr("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
-                _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base));
+                _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
+                p2i(_base + MetaspaceShared::final_delta()));
 }
 
 void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
@@ -172,14 +185,14 @@
   }
 }
 
-DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
-size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
+static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
+static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
 
 void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) {
   // Start with 0 committed bytes. The memory will be committed as needed by
   // MetaspaceShared::commit_shared_space_to().
   if (!_shared_vs.initialize(_shared_rs, 0)) {
-    vm_exit_during_initialization("Unable to allocate memory for shared space");
+    fatal("Unable to allocate memory for shared space");
   }
   first_space->init(&_shared_rs, (char*)first_space_bottom);
 }
@@ -209,73 +222,32 @@
   return _ro_region.allocate(num_bytes);
 }
 
-void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
-  assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
-
-  // If using shared space, open the file that contains the shared space
-  // and map in the memory before initializing the rest of metaspace (so
-  // the addresses don't conflict)
-  FileMapInfo* mapinfo = new FileMapInfo(true);
-
-  // Open the shared archive file, read and validate the header. If
-  // initialization fails, shared spaces [UseSharedSpaces] are
-  // disabled and the file is closed.
-  // Map in spaces now also
-  if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) {
-    size_t cds_total = core_spaces_size();
-    address cds_address = (address)mapinfo->region_addr(0);
-    char* cds_end = (char *)align_up(cds_address + cds_total,
-                                     Metaspace::reserve_alignment());
-
-    // Mapping the dynamic archive before allocating the class space
-    cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end);
-
-#ifdef _LP64
-    if (Metaspace::using_class_space()) {
-      // If UseCompressedClassPointers is set then allocate the metaspace area
-      // above the heap and above the CDS area (if it exists).
-      Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
-      // map_heap_regions() compares the current narrow oop and klass encodings
-      // with the archived ones, so it must be done after all encodings are determined.
-      mapinfo->map_heap_regions();
-    }
-    CompressedKlassPointers::set_range(CompressedClassSpaceSize);
-#endif // _LP64
+// When reserving an address range using ReservedSpace, we need an alignment that satisfies both:
+// os::vm_allocation_granularity() -- so that we can sub-divide this range into multiple mmap regions,
+//                                    while keeping the first range at offset 0 of this range.
+// Metaspace::reserve_alignment()  -- so we can pass the region to
+//                                    Metaspace::allocate_metaspace_compressed_klass_ptrs.
+size_t MetaspaceShared::reserved_space_alignment() {
+  size_t os_align = os::vm_allocation_granularity();
+  size_t ms_align = Metaspace::reserve_alignment();
+  if (os_align >= ms_align) {
+    assert(os_align % ms_align == 0, "must be a multiple");
+    return os_align;
   } else {
-    assert(!mapinfo->is_open() && !UseSharedSpaces,
-           "archive file not closed or shared spaces not disabled.");
+    assert(ms_align % os_align == 0, "must be a multiple");
+    return ms_align;
   }
 }
 
-char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces(
-        char* static_start, char* static_end) {
-  assert(UseSharedSpaces, "must be runtime");
-  char* cds_end = static_end;
-  if (!DynamicDumpSharedSpaces) {
-    address dynamic_top = DynamicArchive::map();
-    if (dynamic_top != NULL) {
-      assert(dynamic_top > (address)static_start, "Unexpected layout");
-      MetaspaceObj::expand_shared_metaspace_range(dynamic_top);
-      cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment());
-    }
-  }
-  return cds_end;
-}
-
-ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment,
-                                                  bool large, char* requested_address) {
-  if (requested_address != NULL) {
-    _shared_rs = ReservedSpace(size, alignment, large, requested_address);
-  } else {
-    _shared_rs = ReservedSpace(size, alignment, large);
-  }
-  return &_shared_rs;
+ReservedSpace MetaspaceShared::reserve_shared_space(size_t size, char* requested_address) {
+  bool large_pages = false; // Don't use large pages for the CDS archive.
+  assert(is_aligned(requested_address, reserved_space_alignment()), "must be");
+  return ReservedSpace(size, reserved_space_alignment(), large_pages, requested_address);
 }
 
 void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
   assert(DumpSharedSpaces, "should be called for dump time only");
-  const size_t reserve_alignment = Metaspace::reserve_alignment();
-  bool large_pages = false; // No large pages when dumping the CDS archive.
+  const size_t reserve_alignment = reserved_space_alignment();
   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment);
 
 #ifdef _LP64
@@ -296,15 +268,22 @@
   size_t cds_total = align_down(256*M, reserve_alignment);
 #endif
 
+  bool use_requested_base = true;
+  if (ArchiveRelocationMode == 1) {
+    log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
+    use_requested_base = false;
+  }
+
   // First try to reserve the space at the specified SharedBaseAddress.
-  //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base);
-  reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base);
+  assert(!_shared_rs.is_reserved(), "must be");
+  if (use_requested_base) {
+    _shared_rs = reserve_shared_space(cds_total, shared_base);
+  }
   if (_shared_rs.is_reserved()) {
     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match");
   } else {
     // Get a mmap region anywhere if the SharedBaseAddress fails.
-    //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages);
-    reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL);
+    _shared_rs = reserve_shared_space(cds_total);
   }
   if (!_shared_rs.is_reserved()) {
     vm_exit_during_initialization("Unable to reserve memory for shared space",
@@ -442,6 +421,8 @@
   assert(commit <= uncommitted, "sanity");
 
   bool result = _shared_vs.expand_by(commit, false);
+  ArchivePtrMarker::expand_ptr_end((address*)_shared_vs.high());
+
   if (!result) {
     vm_exit_during_initialization(err_msg("Failed to expand shared space to " SIZE_FORMAT " bytes",
                                           need_committed_size));
@@ -451,6 +432,10 @@
                 commit, _shared_vs.actual_committed_size(), _shared_vs.high());
 }
 
+void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
+  ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high());
+}
+
 // Read/write a data stream for restoring/preserving metadata pointers and
 // miscellaneous data from/to the shared archive file.
 
@@ -469,6 +454,7 @@
   soc->do_tag(sizeof(Symbol));
 
   // Dump/restore miscellaneous metadata.
+  JavaClasses::serialize_offsets(soc);
   Universe::serialize(soc);
   soc->do_tag(--tag);
 
@@ -482,7 +468,6 @@
   HeapShared::serialize_subgraph_info_table_header(soc);
   SystemDictionaryShared::serialize_dictionary_headers(soc);
 
-  JavaClasses::serialize_offsets(soc);
   InstanceMirrorKlass::serialize_offsets(soc);
   soc->do_tag(--tag);
 
@@ -596,7 +581,7 @@
   Universe::set_void_mirror(NULL);
 }
 
-static void rewrite_nofast_bytecode(Method* method) {
+static void rewrite_nofast_bytecode(const methodHandle& method) {
   BytecodeStream bcs(method);
   while (!bcs.is_last_bytecode()) {
     Bytecodes::Code opcode = bcs.next();
@@ -620,19 +605,19 @@
 // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified
 //     at run time by RewriteBytecodes/RewriteFrequentPairs
 // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time.
-static void rewrite_nofast_bytecodes_and_calculate_fingerprints() {
+static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread) {
   for (int i = 0; i < _global_klass_objects->length(); i++) {
     Klass* k = _global_klass_objects->at(i);
     if (k->is_instance_klass()) {
       InstanceKlass* ik = InstanceKlass::cast(k);
-      MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik);
+      MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(thread, ik);
     }
   }
 }
 
-void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(InstanceKlass* ik) {
+void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik) {
   for (int i = 0; i < ik->methods()->length(); i++) {
-    Method* m = ik->methods()->at(i);
+    methodHandle m(thread, ik->methods()->at(i));
     rewrite_nofast_bytecode(m);
     Fingerprinter fp(m);
     // The side effect of this call sets method's fingerprint field.
@@ -705,7 +690,9 @@
   // Switch the vtable pointer to point to the cloned vtable.
   static void patch(Metadata* obj) {
     assert(DumpSharedSpaces, "dump-time only");
+    assert(MetaspaceShared::is_in_output_space(obj), "must be");
     *(void**)obj = (void*)(_info->cloned_vtable());
+    ArchivePtrMarker::mark_pointer(obj);
   }
 
   static bool is_valid_shared_object(const T* obj) {
@@ -799,7 +786,8 @@
 }
 
 #define ALLOC_CPP_VTABLE_CLONE(c) \
-  _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c);
+  _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \
+  ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]);
 
 #define CLONE_CPP_VTABLE(c) \
   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
@@ -965,7 +953,7 @@
   assert(size % sizeof(intptr_t) == 0, "bad size");
   do_tag((int)size);
   while (size > 0) {
-    _dump_region->append_intptr_t(*(intptr_t*)start);
+    _dump_region->append_intptr_t(*(intptr_t*)start, true);
     start += sizeof(intptr_t);
     size -= sizeof(intptr_t);
   }
@@ -1129,9 +1117,13 @@
                                  GrowableArray<ArchiveHeapOopmapInfo>* oopmaps);
   void dump_symbols();
   char* dump_read_only_tables();
+  void print_class_stats();
   void print_region_stats();
+  void print_bitmap_region_stats(size_t size, size_t total_size);
   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
-                               const char *name, const size_t total_size);
+                               const char *name, size_t total_size);
+  void relocate_to_default_base_address(CHeapBitMap* ptrmap);
+
 public:
 
   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
@@ -1276,6 +1268,15 @@
       ref->metaspace_pointers_do_at(&refer, new_loc);
       return true; // recurse into ref.obj()
     }
+    virtual void push_special(SpecialRef type, Ref* ref, intptr_t* p) {
+      assert(type == _method_entry_ref, "only special type allowed for now");
+      address obj = ref->obj();
+      address new_obj = get_new_loc(ref);
+      size_t offset = pointer_delta(p, obj,  sizeof(u1));
+      intptr_t* new_p = (intptr_t*)(new_obj + offset);
+      assert(*p == *new_p, "must be a copy");
+      ArchivePtrMarker::mark_pointer((address*)new_p);
+    }
   };
 
   // Relocate a reference to point to its shallow copy
@@ -1284,6 +1285,7 @@
     virtual bool do_ref(Ref* ref, bool read_only) {
       if (ref->not_null()) {
         ref->update(get_new_loc(ref));
+        ArchivePtrMarker::mark_pointer(ref->addr());
       }
       return false; // Do not recurse.
     }
@@ -1440,7 +1442,71 @@
   return start;
 }
 
+void VM_PopulateDumpSharedSpace::print_class_stats() {
+  tty->print_cr("Number of classes %d", _global_klass_objects->length());
+  {
+    int num_type_array = 0, num_obj_array = 0, num_inst = 0;
+    for (int i = 0; i < _global_klass_objects->length(); i++) {
+      Klass* k = _global_klass_objects->at(i);
+      if (k->is_instance_klass()) {
+        num_inst ++;
+      } else if (k->is_objArray_klass()) {
+        num_obj_array ++;
+      } else {
+        assert(k->is_typeArray_klass(), "sanity");
+        num_type_array ++;
+      }
+    }
+    tty->print_cr("    instance classes   = %5d", num_inst);
+    tty->print_cr("    obj array classes  = %5d", num_obj_array);
+    tty->print_cr("    type array classes = %5d", num_type_array);
+  }
+}
+
+void VM_PopulateDumpSharedSpace::relocate_to_default_base_address(CHeapBitMap* ptrmap) {
+  intx addr_delta = MetaspaceShared::final_delta();
+  if (addr_delta == 0) {
+    ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_md_region.top());
+  } else {
+    // We are not able to reserve space at Arguments::default_SharedBaseAddress() (due to ASLR).
+    // This means that the current content of the archive is based on a random
+    // address. Let's relocate all the pointers, so that it can be mapped to
+    // Arguments::default_SharedBaseAddress() without runtime relocation.
+    //
+    // Note: both the base and dynamic archive are written with
+    // FileMapHeader::_shared_base_address == Arguments::default_SharedBaseAddress()
+
+    // Patch all pointers that are marked by ptrmap within this region,
+    // where we have just dumped all the metaspace data.
+    address patch_base = (address)SharedBaseAddress;
+    address patch_end  = (address)_md_region.top();
+    size_t size = patch_end - patch_base;
+
+    // the current value of the pointers to be patched must be within this
+    // range (i.e., must point to valid metaspace objects)
+    address valid_old_base = patch_base;
+    address valid_old_end  = patch_end;
+
+    // after patching, the pointers must point inside this range
+    // (the requested location of the archive, as mapped at runtime).
+    address valid_new_base = (address)Arguments::default_SharedBaseAddress();
+    address valid_new_end  = valid_new_base + size;
+
+    log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
+                   "[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end),
+                   p2i(valid_new_base), p2i(valid_new_end));
+
+    SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
+                                      valid_new_base, valid_new_end, addr_delta, ptrmap);
+    ptrmap->iterate(&patcher);
+    ArchivePtrMarker::compact(patcher.max_non_null_offset());
+  }
+}
+
 void VM_PopulateDumpSharedSpace::doit() {
+  CHeapBitMap ptrmap;
+  MetaspaceShared::initialize_ptr_marker(&ptrmap);
+
   // We should no longer allocate anything from the metaspace, so that:
   //
   // (1) Metaspace::allocate might trigger GC if we have run out of
@@ -1472,28 +1538,11 @@
   CollectClassesClosure collect_classes;
   ClassLoaderDataGraph::loaded_classes_do(&collect_classes);
 
-  tty->print_cr("Number of classes %d", _global_klass_objects->length());
-  {
-    int num_type_array = 0, num_obj_array = 0, num_inst = 0;
-    for (int i = 0; i < _global_klass_objects->length(); i++) {
-      Klass* k = _global_klass_objects->at(i);
-      if (k->is_instance_klass()) {
-        num_inst ++;
-      } else if (k->is_objArray_klass()) {
-        num_obj_array ++;
-      } else {
-        assert(k->is_typeArray_klass(), "sanity");
-        num_type_array ++;
-      }
-    }
-    tty->print_cr("    instance classes   = %5d", num_inst);
-    tty->print_cr("    obj array classes  = %5d", num_obj_array);
-    tty->print_cr("    type array classes = %5d", num_type_array);
-  }
+  print_class_stats();
 
   // Ensure the ConstMethods won't be modified at run-time
   tty->print("Updating ConstMethods ... ");
-  rewrite_nofast_bytecodes_and_calculate_fingerprints();
+  rewrite_nofast_bytecodes_and_calculate_fingerprints(THREAD);
   tty->print_cr("done. ");
 
   // Remove all references outside the metadata
@@ -1520,12 +1569,6 @@
   MetaspaceShared::allocate_cpp_vtable_clones();
   _md_region.pack();
 
-  // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size
-  // is just the spaces between the two ends.
-  size_t core_spaces_size = _md_region.end() - _mc_region.base();
-  assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
-         "should already be aligned");
-
   // During patching, some virtual methods may be called, so at this point
   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate).
   MetaspaceShared::patch_cpp_vtable_pointers();
@@ -1534,6 +1577,10 @@
   // We don't want to write these addresses into the archive.
   MetaspaceShared::zero_cpp_vtable_clones_for_writing();
 
+  // relocate the data so that it can be mapped to Arguments::default_SharedBaseAddress()
+  // without runtime relocation.
+  relocate_to_default_base_address(&ptrmap);
+
   // Create and write the archive file that maps the shared spaces.
 
   FileMapInfo* mapinfo = new FileMapInfo(true);
@@ -1542,7 +1589,6 @@
   mapinfo->set_misc_data_patching_start(vtbl_list);
   mapinfo->set_i2i_entry_code_buffers(MetaspaceShared::i2i_entry_code_buffers(),
                                       MetaspaceShared::i2i_entry_code_buffers_size());
-  mapinfo->set_core_spaces_size(core_spaces_size);
   mapinfo->open_for_write();
 
   // NOTE: md contains the trampoline code for method entries, which are patched at run time,
@@ -1552,6 +1598,8 @@
   write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
   write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
 
+  mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap());
+
   _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
                                         _closed_archive_heap_regions,
                                         _closed_archive_heap_oopmaps,
@@ -1563,6 +1611,7 @@
                                         MetaspaceShared::first_open_archive_heap_region,
                                         MetaspaceShared::max_open_archive_heap_region);
 
+  mapinfo->set_final_requested_base((char*)Arguments::default_SharedBaseAddress());
   mapinfo->set_header_crc(mapinfo->compute_header_crc());
   mapinfo->write_header();
   mapinfo->close();
@@ -1594,12 +1643,16 @@
 
 void VM_PopulateDumpSharedSpace::print_region_stats() {
   // Print statistics of all the regions
+  const size_t bitmap_used = ArchivePtrMarker::ptrmap()->size_in_bytes();
+  const size_t bitmap_reserved = align_up(bitmap_used, Metaspace::reserve_alignment());
   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
                                 _mc_region.reserved()  + _md_region.reserved() +
+                                bitmap_reserved +
                                 _total_closed_archive_region_size +
                                 _total_open_archive_region_size;
   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
                              _mc_region.used()  + _md_region.used() +
+                             bitmap_used +
                              _total_closed_archive_region_size +
                              _total_open_archive_region_size;
   const double total_u_perc = percent_of(total_bytes, total_reserved);
@@ -1608,6 +1661,7 @@
   _rw_region.print(total_reserved);
   _ro_region.print(total_reserved);
   _md_region.print(total_reserved);
+  print_bitmap_region_stats(bitmap_reserved, total_reserved);
   print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
 
@@ -1615,8 +1669,13 @@
                  total_bytes, total_reserved, total_u_perc);
 }
 
+void VM_PopulateDumpSharedSpace::print_bitmap_region_stats(size_t size, size_t total_size) {
+  tty->print_cr("bm  space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
+                size, size/double(total_size)*100.0, size, p2i(NULL));
+}
+
 void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
-                                                         const char *name, const size_t total_size) {
+                                                         const char *name, size_t total_size) {
   int arr_len = heap_mem == NULL ? 0 : heap_mem->length();
   for (int i = 0; i < arr_len; i++) {
       char* start = (char*)heap_mem->at(i).start();
@@ -1636,9 +1695,13 @@
   o->set_klass(k);
 }
 
-Klass* MetaspaceShared::get_relocated_klass(Klass *k) {
+Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
   assert(DumpSharedSpaces, "sanity");
-  return ArchiveCompactor::get_relocated_klass(k);
+  k = ArchiveCompactor::get_relocated_klass(k);
+  if (is_final) {
+    k = (Klass*)(address(k) + final_delta());
+  }
+  return k;
 }
 
 class LinkSharedClassesClosure : public KlassClosure {
@@ -1947,8 +2010,9 @@
   }
 }
 
-void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) {
-  _shared_metaspace_static_top = top;
+void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
+  assert(base <= static_top && static_top <= top, "must be");
+  _shared_metaspace_static_top = static_top;
   MetaspaceObj::set_shared_metaspace_range(base, top);
 }
 
@@ -1973,49 +2037,312 @@
   }
 }
 
-// Map shared spaces at requested addresses and return if succeeded.
-bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
-  size_t image_alignment = mapinfo->alignment();
+void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() {
+  assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled");
+  MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
+  FileMapInfo* static_mapinfo = open_static_archive();
+  FileMapInfo* dynamic_mapinfo = NULL;
+
+  if (static_mapinfo != NULL) {
+    dynamic_mapinfo = open_dynamic_archive();
+
+    // First try to map at the requested address
+    result = map_archives(static_mapinfo, dynamic_mapinfo, true);
+    if (result == MAP_ARCHIVE_MMAP_FAILURE) {
+      // Mapping has failed (probably due to ASLR). Let's map at an address chosen
+      // by the OS.
+      result = map_archives(static_mapinfo, dynamic_mapinfo, false);
+    }
+  }
+
+  if (result == MAP_ARCHIVE_SUCCESS) {
+    bool dynamic_mapped = (dynamic_mapinfo != NULL && dynamic_mapinfo->is_mapped());
+    char* cds_base = static_mapinfo->mapped_base();
+    char* cds_end =  dynamic_mapped ? dynamic_mapinfo->mapped_end() : static_mapinfo->mapped_end();
+    set_shared_metaspace_range(cds_base, static_mapinfo->mapped_end(), cds_end);
+    _relocation_delta = static_mapinfo->relocation_delta();
+    if (dynamic_mapped) {
+      FileMapInfo::set_shared_path_table(dynamic_mapinfo);
+    } else {
+      FileMapInfo::set_shared_path_table(static_mapinfo);
+    }
+  } else {
+    set_shared_metaspace_range(NULL, NULL, NULL);
+    UseSharedSpaces = false;
+    FileMapInfo::fail_continue("Unable to map shared spaces");
+    if (PrintSharedArchiveAndExit) {
+      vm_exit_during_initialization("Unable to use shared archive.");
+    }
+  }
+
+  if (static_mapinfo != NULL && !static_mapinfo->is_mapped()) {
+    delete static_mapinfo;
+  }
+  if (dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped()) {
+    delete dynamic_mapinfo;
+  }
+}
+
+FileMapInfo* MetaspaceShared::open_static_archive() {
+  FileMapInfo* mapinfo = new FileMapInfo(true);
+  if (!mapinfo->initialize()) {
+    delete(mapinfo);
+    return NULL;
+  }
+  return mapinfo;
+}
+
+FileMapInfo* MetaspaceShared::open_dynamic_archive() {
+  if (DynamicDumpSharedSpaces) {
+    return NULL;
+  }
+  if (Arguments::GetSharedDynamicArchivePath() == NULL) {
+    return NULL;
+  }
 
-#ifndef _WINDOWS
-  // Map in the shared memory and then map the regions on top of it.
-  // On Windows, don't map the memory here because it will cause the
-  // mappings of the regions to fail.
-  ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
-  if (!shared_rs.is_reserved()) return false;
-#endif
+  FileMapInfo* mapinfo = new FileMapInfo(false);
+  if (!mapinfo->initialize()) {
+    delete(mapinfo);
+    return NULL;
+  }
+  return mapinfo;
+}
+
+// use_requested_addr:
+//  true  = map at FileMapHeader::_requested_base_address
+//  false = map at an alternative address picked by OS.
+MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
+                                               bool use_requested_addr) {
+  PRODUCT_ONLY(if (ArchiveRelocationMode == 1 && use_requested_addr) {
+      // For product build only -- this is for benchmarking the cost of doing relocation.
+      // For debug builds, the check is done in FileMapInfo::map_regions for better test coverage.
+      log_info(cds)("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
+      return MAP_ARCHIVE_MMAP_FAILURE;
+    });
+
+  if (ArchiveRelocationMode == 2 && !use_requested_addr) {
+    log_info(cds)("ArchiveRelocationMode == 2: never map archive(s) at an alternative address");
+    return MAP_ARCHIVE_MMAP_FAILURE;
+  };
+
+  if (dynamic_mapinfo != NULL) {
+    // Ensure that the OS won't be able to allocate new memory spaces between the two
+    // archives, or else it would mess up the simple comparision in MetaspaceObj::is_shared().
+    assert(static_mapinfo->mapping_end_offset() == dynamic_mapinfo->mapping_base_offset(), "no gap");
+  }
 
-  assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
+  ReservedSpace main_rs, archive_space_rs, class_space_rs;
+  MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
+  char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo, dynamic_mapinfo,
+                                                                 use_requested_addr, main_rs, archive_space_rs,
+                                                                 class_space_rs);
+  if (mapped_base_address == NULL) {
+    result = MAP_ARCHIVE_MMAP_FAILURE;
+  } else {
+    log_debug(cds)("Reserved archive_space_rs     [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
+                   p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
+    log_debug(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (" SIZE_FORMAT ") bytes",
+                   p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
+    MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
+    MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
+                                     map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
 
-  // Map each shared region
-  int regions[] = {mc, rw, ro, md};
-  size_t len = sizeof(regions)/sizeof(int);
-  char* saved_base[] = {NULL, NULL, NULL, NULL};
-  char* top = mapinfo->map_regions(regions, saved_base, len );
+    if (static_result == MAP_ARCHIVE_SUCCESS) {
+      if (dynamic_result == MAP_ARCHIVE_SUCCESS) {
+        result = MAP_ARCHIVE_SUCCESS;
+      } else if (dynamic_result == MAP_ARCHIVE_OTHER_FAILURE) {
+        assert(dynamic_mapinfo != NULL && !dynamic_mapinfo->is_mapped(), "must have failed");
+        // No need to retry mapping the dynamic archive again, as it will never succeed
+        // (bad file, etc) -- just keep the base archive.
+        log_warning(cds, dynamic)("Unable to use shared archive. The top archive failed to load: %s",
+                                  dynamic_mapinfo->full_path());
+        result = MAP_ARCHIVE_SUCCESS;
+        // TODO, we can give the unused space for the dynamic archive to class_space_rs, but there's no
+        // easy API to do that right now.
+      } else {
+        result = MAP_ARCHIVE_MMAP_FAILURE;
+      }
+    } else if (static_result == MAP_ARCHIVE_OTHER_FAILURE) {
+      result = MAP_ARCHIVE_OTHER_FAILURE;
+    } else {
+      result = MAP_ARCHIVE_MMAP_FAILURE;
+    }
+  }
 
-  if (top != NULL &&
-      (image_alignment == (size_t)os::vm_allocation_granularity()) &&
-      mapinfo->validate_shared_path_table()) {
-    // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
-    // fast checking in MetaspaceShared::is_in_shared_metaspace() and
-    // MetaspaceObj::is_shared().
-    _core_spaces_size = mapinfo->core_spaces_size();
-    set_shared_metaspace_range((void*)saved_base[0], (void*)top);
-    return true;
+  if (result == MAP_ARCHIVE_SUCCESS) {
+    if (!main_rs.is_reserved() && class_space_rs.is_reserved()) {
+      MemTracker::record_virtual_memory_type((address)class_space_rs.base(), mtClass);
+    }
+    SharedBaseAddress = (size_t)mapped_base_address;
+    LP64_ONLY({
+        if (Metaspace::using_class_space()) {
+          assert(class_space_rs.is_reserved(), "must be");
+          char* cds_base = static_mapinfo->mapped_base();
+          Metaspace::allocate_metaspace_compressed_klass_ptrs(class_space_rs, NULL, (address)cds_base);
+          // map_heap_regions() compares the current narrow oop and klass encodings
+          // with the archived ones, so it must be done after all encodings are determined.
+          static_mapinfo->map_heap_regions();
+        }
+        CompressedKlassPointers::set_range(CompressedClassSpaceSize);
+      });
+  } else {
+    unmap_archive(static_mapinfo);
+    unmap_archive(dynamic_mapinfo);
+    release_reserved_spaces(main_rs, archive_space_rs, class_space_rs);
+  }
+
+  return result;
+}
+
+char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
+                                                          FileMapInfo* dynamic_mapinfo,
+                                                          bool use_requested_addr,
+                                                          ReservedSpace& main_rs,
+                                                          ReservedSpace& archive_space_rs,
+                                                          ReservedSpace& class_space_rs) {
+  const bool use_klass_space = NOT_LP64(false) LP64_ONLY(Metaspace::using_class_space());
+  const size_t class_space_size = NOT_LP64(0) LP64_ONLY(Metaspace::compressed_class_space_size());
+
+  if (use_klass_space) {
+    assert(class_space_size > 0, "CompressedClassSpaceSize must have been validated");
+  }
+  if (use_requested_addr && !is_aligned(static_mapinfo->requested_base_address(), reserved_space_alignment())) {
+    return NULL;
+  }
+
+  // Size and requested location of the archive_space_rs (for both static and dynamic archives)
+  size_t base_offset = static_mapinfo->mapping_base_offset();
+  size_t end_offset  = (dynamic_mapinfo == NULL) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
+  assert(base_offset == 0, "must be");
+  assert(is_aligned(end_offset,  os::vm_allocation_granularity()), "must be");
+  assert(is_aligned(base_offset, os::vm_allocation_granularity()), "must be");
+
+  // In case reserved_space_alignment() != os::vm_allocation_granularity()
+  assert((size_t)os::vm_allocation_granularity() <= reserved_space_alignment(), "must be");
+  end_offset = align_up(end_offset, reserved_space_alignment());
+
+  size_t archive_space_size = end_offset - base_offset;
+
+  // Special handling for Windows because it cannot mmap into a reserved space:
+  //    use_requested_addr: We just map each region individually, and give up if any one of them fails.
+  //   !use_requested_addr: We reserve the space first, and then os::read in all the regions (instead of mmap).
+  //                        We're going to patch all the pointers anyway so there's no benefit for mmap.
+
+  if (use_requested_addr) {
+    char* archive_space_base = static_mapinfo->requested_base_address() + base_offset;
+    char* archive_space_end  = archive_space_base + archive_space_size;
+    if (!MetaspaceShared::use_windows_memory_mapping()) {
+      archive_space_rs = reserve_shared_space(archive_space_size, archive_space_base);
+      if (!archive_space_rs.is_reserved()) {
+        return NULL;
+      }
+    }
+    if (use_klass_space) {
+      // Make sure we can map the klass space immediately following the archive_space space
+      char* class_space_base = archive_space_end;
+      class_space_rs = reserve_shared_space(class_space_size, class_space_base);
+      if (!class_space_rs.is_reserved()) {
+        return NULL;
+      }
+    }
+    return static_mapinfo->requested_base_address();
   } else {
-    mapinfo->unmap_regions(regions, saved_base, len);
-#ifndef _WINDOWS
-    // Release the entire mapped region
-    shared_rs.release();
-#endif
-    // If -Xshare:on is specified, print out the error message and exit VM,
-    // otherwise, set UseSharedSpaces to false and continue.
-    if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
-      vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
+    if (use_klass_space) {
+      main_rs = reserve_shared_space(archive_space_size + class_space_size);
+      if (main_rs.is_reserved()) {
+        archive_space_rs = main_rs.first_part(archive_space_size, reserved_space_alignment(), /*split=*/true);
+        class_space_rs = main_rs.last_part(archive_space_size);
+      }
+    } else {
+      main_rs = reserve_shared_space(archive_space_size);
+      archive_space_rs = main_rs;
+    }
+    if (archive_space_rs.is_reserved()) {
+      return archive_space_rs.base();
     } else {
-      FLAG_SET_DEFAULT(UseSharedSpaces, false);
+      return NULL;
+    }
+  }
+}
+
+void MetaspaceShared::release_reserved_spaces(ReservedSpace& main_rs,
+                                              ReservedSpace& archive_space_rs,
+                                              ReservedSpace& class_space_rs) {
+  if (main_rs.is_reserved()) {
+    assert(main_rs.contains(archive_space_rs.base()), "must be");
+    assert(main_rs.contains(class_space_rs.base()), "must be");
+    log_debug(cds)("Released shared space (archive+classes) " INTPTR_FORMAT, p2i(main_rs.base()));
+    main_rs.release();
+  } else {
+    if (archive_space_rs.is_reserved()) {
+      log_debug(cds)("Released shared space (archive) " INTPTR_FORMAT, p2i(archive_space_rs.base()));
+      archive_space_rs.release();
+    }
+    if (class_space_rs.is_reserved()) {
+      log_debug(cds)("Released shared space (classes) " INTPTR_FORMAT, p2i(class_space_rs.base()));
+      class_space_rs.release();
     }
-    return false;
+  }
+}
+
+static int static_regions[]  = {MetaspaceShared::mc,
+                                MetaspaceShared::rw,
+                                MetaspaceShared::ro,
+                                MetaspaceShared::md};
+static int dynamic_regions[] = {MetaspaceShared::rw,
+                                MetaspaceShared::ro,
+                                MetaspaceShared::mc};
+static int static_regions_count  = 4;
+static int dynamic_regions_count = 3;
+
+MapArchiveResult MetaspaceShared::map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs) {
+  assert(UseSharedSpaces, "must be runtime");
+  if (mapinfo == NULL) {
+    return MAP_ARCHIVE_SUCCESS; // no error has happeed -- trivially succeeded.
+  }
+
+  mapinfo->set_is_mapped(false);
+
+  if (mapinfo->alignment() != (size_t)os::vm_allocation_granularity()) {
+    log_error(cds)("Unable to map CDS archive -- os::vm_allocation_granularity() expected: " SIZE_FORMAT
+                   " actual: %d", mapinfo->alignment(), os::vm_allocation_granularity());
+    return MAP_ARCHIVE_OTHER_FAILURE;
+  }
+
+  MapArchiveResult result = mapinfo->is_static() ?
+    mapinfo->map_regions(static_regions, static_regions_count, mapped_base_address, rs) :
+    mapinfo->map_regions(dynamic_regions, dynamic_regions_count, mapped_base_address, rs);
+
+  if (result != MAP_ARCHIVE_SUCCESS) {
+    unmap_archive(mapinfo);
+    return result;
+  }
+
+  if (mapinfo->is_static()) {
+    if (!mapinfo->validate_shared_path_table()) {
+      unmap_archive(mapinfo);
+      return MAP_ARCHIVE_OTHER_FAILURE;
+    }
+  } else {
+    if (!DynamicArchive::validate(mapinfo)) {
+      unmap_archive(mapinfo);
+      return MAP_ARCHIVE_OTHER_FAILURE;
+    }
+  }
+
+  mapinfo->set_is_mapped(true);
+  return MAP_ARCHIVE_SUCCESS;
+}
+
+void MetaspaceShared::unmap_archive(FileMapInfo* mapinfo) {
+  assert(UseSharedSpaces, "must be runtime");
+  if (mapinfo != NULL) {
+    if (mapinfo->is_static()) {
+      mapinfo->unmap_regions(static_regions, static_regions_count);
+    } else {
+      mapinfo->unmap_regions(dynamic_regions, dynamic_regions_count);
+    }
+    mapinfo->set_is_mapped(false);
   }
 }
 
@@ -2023,17 +2350,15 @@
 // serialize it out to its various destinations.
 
 void MetaspaceShared::initialize_shared_spaces() {
-  FileMapInfo *mapinfo = FileMapInfo::current_info();
-  _i2i_entry_code_buffers = mapinfo->i2i_entry_code_buffers();
-  _i2i_entry_code_buffers_size = mapinfo->i2i_entry_code_buffers_size();
-  // _core_spaces_size is loaded from the shared archive immediatelly after mapping
-  assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity");
-  char* buffer = mapinfo->misc_data_patching_start();
+  FileMapInfo *static_mapinfo = FileMapInfo::current_info();
+  _i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers();
+  _i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size();
+  char* buffer = static_mapinfo->misc_data_patching_start();
   clone_cpp_vtables((intptr_t*)buffer);
 
   // Verify various attributes of the archive, plus initialize the
   // shared string/symbol tables
-  buffer = mapinfo->serialized_data_start();
+  buffer = static_mapinfo->serialized_data_start();
   intptr_t* array = (intptr_t*)buffer;
   ReadClosure rc(&array);
   serialize(&rc);
@@ -2041,17 +2366,26 @@
   // Initialize the run-time symbol table.
   SymbolTable::create_table();
 
-  mapinfo->patch_archived_heap_embedded_pointers();
+  static_mapinfo->patch_archived_heap_embedded_pointers();
 
   // Close the mapinfo file
-  mapinfo->close();
+  static_mapinfo->close();
+
+  FileMapInfo *dynamic_mapinfo = FileMapInfo::dynamic_info();
+  if (dynamic_mapinfo != NULL) {
+    intptr_t* buffer = (intptr_t*)dynamic_mapinfo->serialized_data_start();
+    ReadClosure rc(&buffer);
+    SymbolTable::serialize_shared_table_header(&rc, false);
+    SystemDictionaryShared::serialize_dictionary_headers(&rc, false);
+    dynamic_mapinfo->close();
+  }
 
   if (PrintSharedArchiveAndExit) {
     if (PrintSharedDictionary) {
       tty->print_cr("\nShared classes:\n");
       SystemDictionaryShared::print_on(tty);
     }
-    if (_archive_loading_failed) {
+    if (FileMapInfo::current_info() == NULL || _archive_loading_failed) {
       tty->print_cr("archive is invalid");
       vm_exit(1);
     } else {
@@ -2094,3 +2428,10 @@
   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
                                 "Please reduce the number of shared classes.");
 }
+
+// This is used to relocate the pointers so that the archive can be mapped at
+// Arguments::default_SharedBaseAddress() without runtime relocation.
+intx MetaspaceShared::final_delta() {
+  return intx(Arguments::default_SharedBaseAddress())  // We want the archive to be mapped to here at runtime
+       - intx(SharedBaseAddress);                      // .. but the archive is mapped at here at dump time
+}
--- a/src/hotspot/share/memory/metaspaceShared.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/metaspaceShared.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -37,6 +37,13 @@
 #define MAX_SHARED_DELTA                (0x7FFFFFFF)
 
 class FileMapInfo;
+class CHeapBitMap;
+
+enum MapArchiveResult {
+  MAP_ARCHIVE_SUCCESS,
+  MAP_ARCHIVE_MMAP_FAILURE,
+  MAP_ARCHIVE_OTHER_FAILURE
+};
 
 class MetaspaceSharedStats {
 public:
@@ -62,13 +69,7 @@
   char* expand_top_to(char* newtop);
   char* allocate(size_t num_bytes, size_t alignment=BytesPerWord);
 
-  void append_intptr_t(intptr_t n) {
-    assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
-    intptr_t *p = (intptr_t*)_top;
-    char* newtop = _top + sizeof(intptr_t);
-    expand_top_to(newtop);
-    *p = n;
-  }
+  void append_intptr_t(intptr_t n, bool need_to_mark = false);
 
   char* base()      const { return _base;        }
   char* top()       const { return _top;         }
@@ -117,17 +118,15 @@
   }
 
   void do_ptr(void** p) {
-    _dump_region->append_intptr_t((intptr_t)*p);
+    _dump_region->append_intptr_t((intptr_t)*p, true);
   }
 
   void do_u4(u4* p) {
-    void* ptr = (void*)(uintx(*p));
-    do_ptr(&ptr);
+    _dump_region->append_intptr_t((intptr_t)(*p));
   }
 
   void do_bool(bool *p) {
-    void* ptr = (void*)(uintx(*p));
-    do_ptr(&ptr);
+    _dump_region->append_intptr_t((intptr_t)(*p));
   }
 
   void do_tag(int tag) {
@@ -170,7 +169,7 @@
   bool reading() const { return true; }
 };
 
-#endif
+#endif // INCLUDE_CDS
 
 // Class Data Sharing Support
 class MetaspaceShared : AllStatic {
@@ -187,6 +186,7 @@
   static size_t  _i2i_entry_code_buffers_size;
   static size_t  _core_spaces_size;
   static void* _shared_metaspace_static_top;
+  static intx _relocation_delta;
  public:
   enum {
     // core archive spaces
@@ -194,11 +194,12 @@
     rw = 1,  // read-write shared space in the heap
     ro = 2,  // read-only shared space in the heap
     md = 3,  // miscellaneous data for initializing tables, etc.
-    num_core_spaces = 4, // number of non-string regions
-    num_non_heap_spaces = 4,
+    bm = 4,  // relocation bitmaps (freed after file mapping is finished)
+    num_core_region = 4,
+    num_non_heap_spaces = 5,
 
     // mapped java heap regions
-    first_closed_archive_heap_region = md + 1,
+    first_closed_archive_heap_region = bm + 1,
     max_closed_archive_heap_region = 2,
     last_closed_archive_heap_region = first_closed_archive_heap_region + max_closed_archive_heap_region - 1,
     first_open_archive_heap_region = last_closed_archive_heap_region + 1,
@@ -220,16 +221,14 @@
     CDS_ONLY(return &_shared_rs);
     NOT_CDS(return NULL);
   }
+
+  static void set_shared_rs(ReservedSpace rs) {
+    CDS_ONLY(_shared_rs = rs);
+  }
+
   static void commit_shared_space_to(char* newtop) NOT_CDS_RETURN;
-  static size_t core_spaces_size() {
-    assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
-    assert(_core_spaces_size != 0, "sanity");
-    return _core_spaces_size;
-  }
   static void initialize_dumptime_shared_and_meta_spaces() NOT_CDS_RETURN;
   static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN;
-  static char* initialize_dynamic_runtime_shared_spaces(
-                     char* static_start, char* static_end) NOT_CDS_RETURN_(NULL);
   static void post_initialize(TRAPS) NOT_CDS_RETURN;
 
   // Delta of this object from SharedBaseAddress
@@ -245,22 +244,25 @@
   static void set_archive_loading_failed() {
     _archive_loading_failed = true;
   }
+  static bool is_in_output_space(void* ptr) {
+    assert(DumpSharedSpaces, "must be");
+    return shared_rs()->contains(ptr);
+  }
+
   static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
   static void initialize_shared_spaces() NOT_CDS_RETURN;
 
   // Return true if given address is in the shared metaspace regions (i.e., excluding any
   // mapped shared heap regions.)
   static bool is_in_shared_metaspace(const void* p) {
-    // If no shared metaspace regions are mapped, MetaspceObj::_shared_metaspace_{base,top} will
-    // both be NULL and all values of p will be rejected quickly.
-    return (p < MetaspaceObj::shared_metaspace_top() && p >= MetaspaceObj::shared_metaspace_base());
+    return MetaspaceObj::is_shared((const MetaspaceObj*)p);
   }
 
   static address shared_metaspace_top() {
     return (address)MetaspaceObj::shared_metaspace_top();
   }
 
-  static void set_shared_metaspace_range(void* base, void* top) NOT_CDS_RETURN;
+  static void set_shared_metaspace_range(void* base, void *static_top, void* top) NOT_CDS_RETURN;
 
   // Return true if given address is in the shared region corresponding to the idx
   static bool is_in_shared_region(const void* p, int idx) NOT_CDS_RETURN_(false);
@@ -298,8 +300,8 @@
   static void link_and_cleanup_shared_classes(TRAPS);
 
 #if INCLUDE_CDS
-  static ReservedSpace* reserve_shared_rs(size_t size, size_t alignment,
-                                          bool large, char* requested_address);
+  static ReservedSpace reserve_shared_space(size_t size, char* requested_address = NULL);
+  static size_t reserved_space_alignment();
   static void init_shared_dump_space(DumpRegion* first_space, address first_space_bottom = NULL);
   static DumpRegion* misc_code_dump_space();
   static DumpRegion* read_write_dump_space();
@@ -307,7 +309,7 @@
   static void pack_dump_space(DumpRegion* current, DumpRegion* next,
                               ReservedSpace* rs);
 
-  static void rewrite_nofast_bytecodes_and_calculate_fingerprints(InstanceKlass* ik);
+  static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik);
 #endif
 
   // Allocate a block of memory from the "mc", "ro", or "rw" regions.
@@ -342,11 +344,35 @@
   }
   static void relocate_klass_ptr(oop o);
 
-  static Klass* get_relocated_klass(Klass *k);
+  static Klass* get_relocated_klass(Klass *k, bool is_final=false);
 
   static intptr_t* fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj);
+  static void initialize_ptr_marker(CHeapBitMap* ptrmap);
 
+  // Non-zero if the archive(s) need to be mapped a non-default location due to ASLR.
+  static intx relocation_delta() { return _relocation_delta; }
+  static intx final_delta();
+  static bool use_windows_memory_mapping() {
+    const bool is_windows = (NOT_WINDOWS(false) WINDOWS_ONLY(true));
+    //const bool is_windows = true; // enable this to allow testing the windows mmap semantics on Linux, etc.
+    return is_windows;
+  }
 private:
   static void read_extra_data(const char* filename, TRAPS) NOT_CDS_RETURN;
+  static FileMapInfo* open_static_archive();
+  static FileMapInfo* open_dynamic_archive();
+  static MapArchiveResult map_archives(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo,
+                                       bool use_requested_addr);
+  static char* reserve_address_space_for_archives(FileMapInfo* static_mapinfo,
+                                                  FileMapInfo* dynamic_mapinfo,
+                                                  bool use_requested_addr,
+                                                  ReservedSpace& main_rs,
+                                                  ReservedSpace& archive_space_rs,
+                                                  ReservedSpace& class_space_rs);
+  static void release_reserved_spaces(ReservedSpace& main_rs,
+                                      ReservedSpace& archive_space_rs,
+                                      ReservedSpace& class_space_rs);
+  static MapArchiveResult map_archive(FileMapInfo* mapinfo, char* mapped_base_address, ReservedSpace rs);
+  static void unmap_archive(FileMapInfo* mapinfo);
 };
 #endif // SHARE_MEMORY_METASPACESHARED_HPP
--- a/src/hotspot/share/memory/universe.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/memory/universe.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -85,18 +85,24 @@
 #include "utilities/ostream.hpp"
 #include "utilities/preserveException.hpp"
 
+#define PRIMITIVE_MIRRORS_DO(func) \
+  func(_int_mirror)    \
+  func(_float_mirror)  \
+  func(_double_mirror) \
+  func(_byte_mirror)   \
+  func(_bool_mirror)   \
+  func(_char_mirror)   \
+  func(_long_mirror)   \
+  func(_short_mirror)  \
+  func(_void_mirror)
+
+#define DEFINE_PRIMITIVE_MIRROR(m) \
+    oop Universe::m  = NULL;
+
 // Known objects
+PRIMITIVE_MIRRORS_DO(DEFINE_PRIMITIVE_MIRROR)
 Klass* Universe::_typeArrayKlassObjs[T_LONG+1]        = { NULL /*, NULL...*/ };
 Klass* Universe::_objectArrayKlassObj                 = NULL;
-oop Universe::_int_mirror                             = NULL;
-oop Universe::_float_mirror                           = NULL;
-oop Universe::_double_mirror                          = NULL;
-oop Universe::_byte_mirror                            = NULL;
-oop Universe::_bool_mirror                            = NULL;
-oop Universe::_char_mirror                            = NULL;
-oop Universe::_long_mirror                            = NULL;
-oop Universe::_short_mirror                           = NULL;
-oop Universe::_void_mirror                            = NULL;
 oop Universe::_mirrors[T_VOID+1]                      = { NULL /*, NULL...*/ };
 oop Universe::_main_thread_group                      = NULL;
 oop Universe::_system_thread_group                    = NULL;
@@ -167,17 +173,11 @@
   }
 }
 
-void Universe::oops_do(OopClosure* f) {
+#define DO_PRIMITIVE_MIRROR(m) \
+  f->do_oop((oop*) &m);
 
-  f->do_oop((oop*) &_int_mirror);
-  f->do_oop((oop*) &_float_mirror);
-  f->do_oop((oop*) &_double_mirror);
-  f->do_oop((oop*) &_byte_mirror);
-  f->do_oop((oop*) &_bool_mirror);
-  f->do_oop((oop*) &_char_mirror);
-  f->do_oop((oop*) &_long_mirror);
-  f->do_oop((oop*) &_short_mirror);
-  f->do_oop((oop*) &_void_mirror);
+void Universe::oops_do(OopClosure* f) {
+  PRIMITIVE_MIRRORS_DO(DO_PRIMITIVE_MIRROR);
 
   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
     f->do_oop((oop*) &_mirrors[i]);
@@ -231,6 +231,13 @@
   _do_stack_walk_cache->metaspace_pointers_do(it);
 }
 
+#define ASSERT_MIRROR_NULL(m) \
+  assert(m == NULL, "archived mirrors should be NULL");
+
+#define SERIALIZE_MIRROR(m) \
+  f->do_oop(&m); \
+  if (m != NULL) { java_lang_Class::update_archived_primitive_mirror_native_pointers(m); }
+
 // Serialize metadata and pointers to primitive type mirrors in and out of CDS archive
 void Universe::serialize(SerializeClosure* f) {
 
@@ -239,25 +246,12 @@
   }
 
   f->do_ptr((void**)&_objectArrayKlassObj);
+
 #if INCLUDE_CDS_JAVA_HEAP
-#ifdef ASSERT
-  if (DumpSharedSpaces && !HeapShared::is_heap_object_archiving_allowed()) {
-    assert(_int_mirror == NULL    && _float_mirror == NULL &&
-           _double_mirror == NULL && _byte_mirror == NULL  &&
-           _bool_mirror == NULL   && _char_mirror == NULL  &&
-           _long_mirror == NULL   && _short_mirror == NULL &&
-           _void_mirror == NULL, "mirrors should be NULL");
-  }
-#endif
-  f->do_oop(&_int_mirror);
-  f->do_oop(&_float_mirror);
-  f->do_oop(&_double_mirror);
-  f->do_oop(&_byte_mirror);
-  f->do_oop(&_bool_mirror);
-  f->do_oop(&_char_mirror);
-  f->do_oop(&_long_mirror);
-  f->do_oop(&_short_mirror);
-  f->do_oop(&_void_mirror);
+  DEBUG_ONLY(if (DumpSharedSpaces && !HeapShared::is_heap_object_archiving_allowed()) {
+      PRIMITIVE_MIRRORS_DO(ASSERT_MIRROR_NULL);
+    });
+  PRIMITIVE_MIRRORS_DO(SERIALIZE_MIRROR);
 #endif
 
   f->do_ptr((void**)&_the_array_interfaces_array);
@@ -395,13 +389,8 @@
     // so we allocate wherever, and hope that the first collection
     // moves these objects to the bottom of the old generation.
     // We can allocate directly in the permanent generation, so we do.
-    int size;
-    if (UseConcMarkSweepGC) {
-      log_warning(gc)("Using +FullGCALot with concurrent mark sweep gc will not force all objects to relocate");
-      size = FullGCALotDummies;
-    } else {
-      size = FullGCALotDummies * 2;
-    }
+    int size = FullGCALotDummies * 2;
+
     objArrayOop    naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
     objArrayHandle dummy_array(THREAD, naked_array);
     int i = 0;
@@ -424,18 +413,18 @@
   #endif
 }
 
+#define ASSERT_MIRROR_NOT_NULL(m) \
+  assert(m != NULL, "archived mirrors should not be NULL");
+
 void Universe::initialize_basic_type_mirrors(TRAPS) {
 #if INCLUDE_CDS_JAVA_HEAP
     if (UseSharedSpaces &&
         HeapShared::open_archive_heap_region_mapped() &&
         _int_mirror != NULL) {
       assert(HeapShared::is_heap_object_archiving_allowed(), "Sanity");
-      assert(_float_mirror != NULL && _double_mirror != NULL &&
-             _byte_mirror  != NULL && _byte_mirror   != NULL &&
-             _bool_mirror  != NULL && _char_mirror   != NULL &&
-             _long_mirror  != NULL && _short_mirror  != NULL &&
-             _void_mirror  != NULL, "Sanity");
+      PRIMITIVE_MIRRORS_DO(ASSERT_MIRROR_NOT_NULL);
     } else
+      // _int_mirror could be NULL if archived heap is not mapped.
 #endif
     {
       _int_mirror     =
@@ -1224,10 +1213,10 @@
       _fullgc_alot_dummy_array = NULL;
       return false;
     }
-    if (!UseConcMarkSweepGC) {
-      // Release dummy at bottom of old generation
-      _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
-    }
+
+    // Release dummy at bottom of old generation
+    _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
+
     // Release dummy at bottom of permanent generation
     _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
   }
--- a/src/hotspot/share/oops/constMethod.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/constMethod.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -420,6 +420,8 @@
   if (has_default_annotations()) {
       it->push(default_annotations_addr());
   }
+  ConstMethod* this_ptr = this;
+  it->push_method_entry(&this_ptr, (intptr_t*)&_adapter_trampoline);
 }
 
 // Printing
--- a/src/hotspot/share/oops/cpCache.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/cpCache.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -401,7 +401,7 @@
     return;
   }
 
-  const methodHandle adapter = call_info.resolved_method();
+  Method* adapter            = call_info.resolved_method();
   const Handle appendix      = call_info.resolved_appendix();
   const bool has_appendix    = appendix.not_null();
 
@@ -419,7 +419,7 @@
                   invoke_code,
                   p2i(appendix()),
                   (has_appendix ? "" : " (unused)"),
-                  p2i(adapter()));
+                  p2i(adapter));
     adapter->print();
     if (has_appendix)  appendix()->print();
   }
@@ -451,7 +451,7 @@
     resolved_references->obj_at_put(appendix_index, appendix());
   }
 
-  release_set_f1(adapter());  // This must be the last one to set (see NOTE above)!
+  release_set_f1(adapter);  // This must be the last one to set (see NOTE above)!
 
   // The interpreter assembly code does not check byte_2,
   // but it is used by is_resolved, method_if_resolved, etc.
@@ -723,10 +723,12 @@
   bool* f2_used = NEW_RESOURCE_ARRAY(bool, length());
   memset(f2_used, 0, sizeof(bool) * length());
 
+  Thread* THREAD = Thread::current();
+
   // Find all the slots that we need to preserve f2
   for (int i = 0; i < ik->methods()->length(); i++) {
     Method* m = ik->methods()->at(i);
-    RawBytecodeStream bcs(m);
+    RawBytecodeStream bcs(methodHandle(THREAD, m));
     while (!bcs.is_last_bytecode()) {
       Bytecodes::Code opcode = bcs.raw_next();
       switch (opcode) {
--- a/src/hotspot/share/oops/fieldInfo.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/fieldInfo.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -176,7 +176,7 @@
     return (_shorts[low_packed_offset] & FIELDINFO_TAG_MASK) == FIELDINFO_TAG_OFFSET;
   }
 
-  Symbol* name(const constantPoolHandle& cp) const {
+  Symbol* name(ConstantPool* cp) const {
     int index = name_index();
     if (is_internal()) {
       return lookup_symbol(index);
@@ -184,7 +184,7 @@
     return cp->symbol_at(index);
   }
 
-  Symbol* signature(const constantPoolHandle& cp) const {
+  Symbol* signature(ConstantPool* cp) const {
     int index = signature_index();
     if (is_internal()) {
       return lookup_symbol(index);
--- a/src/hotspot/share/oops/fieldStreams.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/fieldStreams.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -79,34 +79,11 @@
     return num_fields;
   }
 
-  FieldStreamBase(Array<u2>* fields, const constantPoolHandle& constants, int start, int limit) {
-    _fields = fields;
-    _constants = constants;
-    _index = start;
-    int num_fields = init_generic_signature_start_slot();
-    if (limit < start) {
-      _limit = num_fields;
-    } else {
-      _limit = limit;
-    }
-  }
+  inline FieldStreamBase(Array<u2>* fields, ConstantPool* constants, int start, int limit);
 
-  FieldStreamBase(Array<u2>* fields, const constantPoolHandle& constants) {
-    _fields = fields;
-    _constants = constants;
-    _index = 0;
-    _limit = init_generic_signature_start_slot();
-  }
-
+  inline FieldStreamBase(Array<u2>* fields, ConstantPool* constants);
  public:
-  FieldStreamBase(InstanceKlass* klass) {
-    _fields = klass->fields();
-    _constants = klass->constants();
-    _index = 0;
-    _limit = klass->java_fields_count();
-    init_generic_signature_start_slot();
-    assert(klass == field_holder(), "");
-  }
+  inline FieldStreamBase(InstanceKlass* klass);
 
   // accessors
   int index() const                 { return _index; }
@@ -136,11 +113,11 @@
   }
 
   Symbol* name() const {
-    return field()->name(_constants);
+    return field()->name(_constants());
   }
 
   Symbol* signature() const {
-    return field()->signature(_constants);
+    return field()->signature(_constants());
   }
 
   Symbol* generic_signature() const {
@@ -242,7 +219,7 @@
 
 class AllFieldStream : public FieldStreamBase {
  public:
-  AllFieldStream(Array<u2>* fields, const constantPoolHandle& constants): FieldStreamBase(fields, constants) {}
+  AllFieldStream(Array<u2>* fields, ConstantPool* constants): FieldStreamBase(fields, constants) {}
   AllFieldStream(InstanceKlass* k):      FieldStreamBase(k->fields(), k->constants()) {}
 };
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/oops/fieldStreams.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_OOPS_FIELDSTREAMS_INLINE_HPP
+#define SHARE_OOPS_FIELDSTREAMS_INLINE_HPP
+
+#include "oops/fieldStreams.hpp"
+#include "runtime/thread.inline.hpp"
+
+FieldStreamBase::FieldStreamBase(Array<u2>* fields, ConstantPool* constants, int start, int limit) : _fields(fields),
+         _constants(constantPoolHandle(Thread::current(), constants)), _index(start) {
+  _index = start;
+  int num_fields = init_generic_signature_start_slot();
+  if (limit < start) {
+    _limit = num_fields;
+  } else {
+    _limit = limit;
+  }
+}
+
+FieldStreamBase::FieldStreamBase(Array<u2>* fields, ConstantPool* constants) : _fields(fields),
+         _constants(constantPoolHandle(Thread::current(), constants)), _index(0) {
+  _limit = init_generic_signature_start_slot();
+}
+
+FieldStreamBase::FieldStreamBase(InstanceKlass* klass) : _fields(klass->fields()),
+         _constants(constantPoolHandle(Thread::current(), klass->constants())), _index(0),
+         _limit(klass->java_fields_count()) {
+  init_generic_signature_start_slot();
+  assert(klass == field_holder(), "");
+}
+
+#endif // SHARE_OOPS_FIELDSTREAMS_INLINE_HPP
--- a/src/hotspot/share/oops/generateOopMap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/generateOopMap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1875,7 +1875,7 @@
 
 
 void GenerateOopMap::do_ldc(int bci) {
-  Bytecode_loadconstant ldc(method(), bci);
+  Bytecode_loadconstant ldc(methodHandle(Thread::current(), method()), bci);
   ConstantPool* cp  = method()->constants();
   constantTag tag = cp->tag_at(ldc.pool_index()); // idx is index in resolved_references
   BasicType       bt  = ldc.result_type();
--- a/src/hotspot/share/oops/instanceKlass.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -54,7 +54,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/constantPool.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceKlass.inline.hpp"
@@ -1577,11 +1577,30 @@
 }
 #endif
 
-static int binary_search(const Array<Method*>* methods, const Symbol* name) {
+bool InstanceKlass::_disable_method_binary_search = false;
+
+int InstanceKlass::quick_search(const Array<Method*>* methods, const Symbol* name) {
   int len = methods->length();
-  // methods are sorted, so do binary search
   int l = 0;
   int h = len - 1;
+
+  if (_disable_method_binary_search) {
+    // At the final stage of dynamic dumping, the methods array may not be sorted
+    // by ascending addresses of their names, so we can't use binary search anymore.
+    // However, methods with the same name are still laid out consecutively inside the
+    // methods array, so let's look for the first one that matches.
+    assert(DynamicDumpSharedSpaces, "must be");
+    while (l <= h) {
+      Method* m = methods->at(l);
+      if (m->name() == name) {
+        return l;
+      }
+      l ++;
+    }
+    return -1;
+  }
+
+  // methods are sorted by ascending addresses of their names, so do binary search
   while (l <= h) {
     int mid = (l + h) >> 1;
     Method* m = methods->at(mid);
@@ -1733,7 +1752,7 @@
   const bool skipping_overpass = (overpass_mode == skip_overpass);
   const bool skipping_static = (static_mode == skip_static);
   const bool skipping_private = (private_mode == skip_private);
-  const int hit = binary_search(methods, name);
+  const int hit = quick_search(methods, name);
   if (hit != -1) {
     const Method* const m = methods->at(hit);
 
@@ -1784,7 +1803,7 @@
                                        const Symbol* name,
                                        int* end_ptr) {
   assert(end_ptr != NULL, "just checking");
-  int start = binary_search(methods, name);
+  int start = quick_search(methods, name);
   int end = start + 1;
   if (start != -1) {
     while (start - 1 >= 0 && (methods->at(start - 1))->name() == name) --start;
@@ -2365,6 +2384,7 @@
   _breakpoints = NULL;
   _previous_versions = NULL;
   _cached_class_file = NULL;
+  _jvmti_cached_class_field_map = NULL;
 #endif
 
   _init_thread = NULL;
@@ -2373,6 +2393,8 @@
   _oop_map_cache = NULL;
   // clear _nest_host to ensure re-load at runtime
   _nest_host = NULL;
+  _package_entry = NULL;
+  _dep_context_last_cleaned = 0;
 }
 
 void InstanceKlass::remove_java_mirror() {
--- a/src/hotspot/share/oops/instanceKlass.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -329,6 +329,8 @@
 
   friend class SystemDictionary;
 
+  static bool _disable_method_binary_search;
+
  public:
   u2 loader_type() {
     return _misc_flags & loader_type_bits();
@@ -564,6 +566,14 @@
   bool find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
   bool find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const;
 
+ private:
+  static int quick_search(const Array<Method*>* methods, const Symbol* name);
+
+ public:
+  static void disable_method_binary_search() {
+    _disable_method_binary_search = true;
+  }
+
   // find a local method (returns NULL if not found)
   Method* find_method(const Symbol* name, const Symbol* signature) const;
   static Method* find_method(const Array<Method*>* methods,
--- a/src/hotspot/share/oops/klassVtable.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/klassVtable.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -300,7 +300,7 @@
       Symbol* signature = target_method()->signature();
       assert(super_method->name() == name && super_method->signature() == signature, "vtable entry name/sig mismatch");
 #endif
-      if (supersuperklass->is_override(super_method, target_loader, target_classname, THREAD)) {
+      if (supersuperklass->is_override(methodHandle(THREAD, super_method), target_loader, target_classname, THREAD)) {
         if (log_develop_is_enabled(Trace, vtables)) {
           ResourceMark rm(THREAD);
           LogTarget(Trace, vtables) lt;
@@ -461,7 +461,7 @@
       // private methods are also never overridden
       if (!super_method->is_private() &&
           (is_default
-          || ((super_klass->is_override(super_method, target_loader, target_classname, THREAD))
+          || ((super_klass->is_override(methodHandle(THREAD, super_method), target_loader, target_classname, THREAD))
           || ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
           && ((super_klass = find_transitive_override(super_klass,
                              target_method, i, target_loader,
@@ -650,7 +650,7 @@
     // methods that have less accessibility
     if ((!super_method->is_static()) &&
        (!super_method->is_private())) {
-      if (superk->is_override(super_method, classloader, classname, THREAD)) {
+      if (superk->is_override(methodHandle(THREAD, super_method), classloader, classname, THREAD)) {
         return false;
       // else keep looking for transitive overrides
       }
@@ -1197,7 +1197,7 @@
   int ime_count = method_count_for_interface(interf);
   for (int i = 0; i < nof_methods; i++) {
     Method* m = methods->at(i);
-    methodHandle target;
+    Method* target = NULL;
     if (m->has_itable_index()) {
       // This search must match the runtime resolution, i.e. selection search for invokeinterface
       // to correctly enforce loader constraints for interface method inheritance.
@@ -1222,6 +1222,7 @@
       // if checkconstraints requested
       if (checkconstraints) {
         Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
+        InstanceKlass* method_holder = target->method_holder();
         if (method_holder_loader() != interface_loader()) {
           ResourceMark rm(THREAD);
           Symbol* failed_type_symbol =
@@ -1240,12 +1241,12 @@
                      " different Class objects for the type %s used in the signature (%s; %s)",
                      interf->class_loader_data()->loader_name_and_id(),
                      interf->external_name(),
-                     target()->method_holder()->class_loader_data()->loader_name_and_id(),
-                     target()->method_holder()->external_kind(),
-                     target()->method_holder()->external_name(),
+                     method_holder->class_loader_data()->loader_name_and_id(),
+                     method_holder->external_kind(),
+                     method_holder->external_name(),
                      failed_type_symbol->as_klass_external_name(),
                      interf->class_in_module_of_loader(false, true),
-                     target()->method_holder()->class_in_module_of_loader(false, true));
+                     method_holder->class_in_module_of_loader(false, true));
             THROW_MSG(vmSymbols::java_lang_LinkageError(), ss.as_string());
           }
         }
@@ -1254,18 +1255,18 @@
       // ime may have moved during GC so recalculate address
       int ime_num = m->itable_index();
       assert(ime_num < ime_count, "oob");
-      itableOffsetEntry::method_entry(_klass, method_table_offset)[ime_num].initialize(target());
+      itableOffsetEntry::method_entry(_klass, method_table_offset)[ime_num].initialize(target);
       if (log_develop_is_enabled(Trace, itables)) {
         ResourceMark rm(THREAD);
-        if (target() != NULL) {
+        if (target != NULL) {
           LogTarget(Trace, itables) lt;
           LogStream ls(lt);
-          char* sig = target()->name_and_sig_as_C_string();
+          char* sig = target->name_and_sig_as_C_string();
           ls.print("interface: %s, ime_num: %d, target: %s, method_holder: %s ",
                        interf->internal_name(), ime_num, sig,
-                       target()->method_holder()->internal_name());
+                       target->method_holder()->internal_name());
           ls.print("target_method flags: ");
-          target()->print_linkage_flags(&ls);
+          target->print_linkage_flags(&ls);
           ls.cr();
         }
       }
--- a/src/hotspot/share/oops/markWord.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/markWord.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -38,20 +38,11 @@
 //  --------
 //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object)
 //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object)
-//             size:32 ------------------------------------------>| (CMS free block)
-//             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
 //
 //  64 bits:
 //  --------
-//  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object)
-//  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object)
-//  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
-//  size:64 ----------------------------------------------------->| (CMS free block)
-//
-//  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object)
-//  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object)
-//  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
-//  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
+//  unused:25 hash:31 -->| unused_gap:1   age:4    biased_lock:1 lock:2 (normal object)
+//  JavaThread*:54 epoch:2 unused_gap:1   age:4    biased_lock:1 lock:2 (biased object)
 //
 //  - hash contains the identity hash value: largest value is
 //    31 bits, see os::random().  Also, 64-bit vm's require
@@ -82,7 +73,7 @@
 //    performed. The runtime system aligns all JavaThread* pointers to
 //    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
 //    to make room for the age bits & the epoch bits (used in support of
-//    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
+//    biased locking).
 //
 //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
 //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
@@ -136,7 +127,7 @@
   static const int biased_lock_bits               = 1;
   static const int max_hash_bits                  = BitsPerWord - age_bits - lock_bits - biased_lock_bits;
   static const int hash_bits                      = max_hash_bits > 31 ? 31 : max_hash_bits;
-  static const int cms_bits                       = LP64_ONLY(1) NOT_LP64(0);
+  static const int unused_gap_bits                = LP64_ONLY(1) NOT_LP64(0);
   static const int epoch_bits                     = 2;
 
   // The biased locking code currently requires that the age bits be
@@ -144,8 +135,8 @@
   static const int lock_shift                     = 0;
   static const int biased_lock_shift              = lock_bits;
   static const int age_shift                      = lock_bits + biased_lock_bits;
-  static const int cms_shift                      = age_shift + age_bits;
-  static const int hash_shift                     = cms_shift + cms_bits;
+  static const int unused_gap_shift               = age_shift + age_bits;
+  static const int hash_shift                     = unused_gap_shift + unused_gap_bits;
   static const int epoch_shift                    = hash_shift;
 
   static const uintptr_t lock_mask                = right_n_bits(lock_bits);
@@ -157,8 +148,6 @@
   static const uintptr_t age_mask_in_place        = age_mask << age_shift;
   static const uintptr_t epoch_mask               = right_n_bits(epoch_bits);
   static const uintptr_t epoch_mask_in_place      = epoch_mask << epoch_shift;
-  static const uintptr_t cms_mask                 = right_n_bits(cms_bits);
-  static const uintptr_t cms_mask_in_place        = cms_mask << cms_shift;
 
   static const uintptr_t hash_mask                = right_n_bits(hash_bits);
   static const uintptr_t hash_mask_in_place       = hash_mask << hash_shift;
@@ -269,12 +258,6 @@
   template <typename KlassProxy>
   inline bool must_be_preserved_for_promotion_failure(KlassProxy klass) const;
 
-  // Should this header be preserved during a scavenge where CMS is
-  // the old generation?
-  // (This is basically the same body as must_be_preserved_for_promotion_failure(),
-  // but takes the Klass* as argument instead)
-  inline bool must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const;
-
   // WARNING: The following routines are used EXCLUSIVELY by
   // synchronization functions. They are not really gc safe.
   // They must get updated if markWord layout get changed.
@@ -375,42 +358,6 @@
 
   // Recover address of oop from encoded form used in mark
   inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return (void*)clear_lock_bits().value(); }
-
-  // These markWords indicate cms free chunk blocks and not objects.
-  // In 64 bit, the markWord is set to distinguish them from oops.
-  // These are defined in 32 bit mode for vmStructs.
-  const static uintptr_t cms_free_chunk_pattern  = 0x1;
-
-  // Constants for the size field.
-  enum { size_shift                = cms_shift + cms_bits,
-         size_bits                 = 35    // need for compressed oops 32G
-       };
-  // These values are too big for Win64
-  const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
-                                     NOT_LP64(0);
-  const static uintptr_t size_mask_in_place =
-                                     (address_word)size_mask << size_shift;
-
-#ifdef _LP64
-  static markWord cms_free_prototype() {
-    return markWord((prototype().value() & ~cms_mask_in_place) |
-                    ((cms_free_chunk_pattern & cms_mask) << cms_shift));
-  }
-  uintptr_t cms_encoding() const {
-    return mask_bits(value() >> cms_shift, cms_mask);
-  }
-  bool is_cms_free_chunk() const {
-    return is_neutral() &&
-           (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;
-  }
-
-  size_t get_size() const       { return (size_t)(value() >> size_shift); }
-  static markWord set_size_and_free(size_t size) {
-    assert((size & ~size_mask) == 0, "shouldn't overflow size field");
-    return markWord((cms_free_prototype().value() & ~size_mask_in_place) |
-                    ((size & size_mask) << size_shift));
-  }
-#endif // _LP64
 };
 
 // Support atomic operations.
--- a/src/hotspot/share/oops/markWord.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/markWord.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -68,11 +68,6 @@
   return (!is_unlocked() || !has_no_hash());
 }
 
-// Same as must_be_preserved_for_promotion_failure().
-inline bool markWord::must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const {
-  return must_be_preserved_for_promotion_failure(klass_of_obj_containing_mark);
-}
-
 inline markWord markWord::prototype_for_klass(const Klass* klass) {
   markWord prototype_header = klass->prototype_header();
   assert(prototype_header == prototype() || prototype_header.has_bias_pattern(), "corrupt prototype header");
--- a/src/hotspot/share/oops/method.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/method.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -36,6 +36,8 @@
 #include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/oopMapCache.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/heapInspection.hpp"
 #include "memory/metadataFactory.hpp"
@@ -330,18 +332,21 @@
   return align_metadata_size(header_size() + extra_words);
 }
 
-
 Symbol* Method::klass_name() const {
   return method_holder()->name();
 }
 
-
 void Method::metaspace_pointers_do(MetaspaceClosure* it) {
   log_trace(cds)("Iter(Method): %p", this);
 
   it->push(&_constMethod);
   it->push(&_method_data);
   it->push(&_method_counters);
+
+  Method* this_ptr = this;
+  it->push_method_entry(&this_ptr, (intptr_t*)&_i2i_entry);
+  it->push_method_entry(&this_ptr, (intptr_t*)&_from_compiled_entry);
+  it->push_method_entry(&this_ptr, (intptr_t*)&_from_interpreted_entry);
 }
 
 // Attempt to return method oop to original state.  Clear any pointers
@@ -448,11 +453,11 @@
   } else {
     method->clear_native_function();
   }
-  if (PrintJNIResolving) {
+  if (log_is_enabled(Debug, jni, resolve)) {
     ResourceMark rm(THREAD);
-    tty->print_cr("[Registering JNI native method %s.%s]",
-      method->method_holder()->external_name(),
-      method->name()->as_C_string());
+    log_debug(jni, resolve)("[Registering JNI native method %s.%s]",
+                            method->method_holder()->external_name(),
+                            method->name()->as_C_string());
   }
   return true;
 }
@@ -544,7 +549,7 @@
     return NULL;
   }
 
-  methodHandle mh(m);
+  methodHandle mh(THREAD, m);
   MethodCounters* counters = MethodCounters::allocate(mh, THREAD);
   if (HAS_PENDING_EXCEPTION) {
     CompileBroker::log_metaspace_failure();
@@ -626,7 +631,7 @@
 
 
 bool Method::compute_has_loops_flag() {
-  BytecodeStream bcs(this);
+  BytecodeStream bcs(methodHandle(Thread::current(), this));
   Bytecodes::Code bc;
 
   while ((bc = bcs.next()) >= 0) {
@@ -984,7 +989,7 @@
       set_not_c2_compilable();
   }
   CompilationPolicy::policy()->disable_compilation(this);
-  assert(!CompilationPolicy::can_be_compiled(this, comp_level), "sanity check");
+  assert(!CompilationPolicy::can_be_compiled(methodHandle(Thread::current(), this), comp_level), "sanity check");
 }
 
 bool Method::is_not_osr_compilable(int comp_level) const {
@@ -1011,7 +1016,7 @@
       set_not_c2_osr_compilable();
   }
   CompilationPolicy::policy()->disable_compilation(this);
-  assert(!CompilationPolicy::can_be_osr_compiled(this, comp_level), "sanity check");
+  assert(!CompilationPolicy::can_be_osr_compiled(methodHandle(Thread::current(), this), comp_level), "sanity check");
 }
 
 // Revert to using the interpreter and clear out the nmethod
@@ -1056,7 +1061,7 @@
   Arguments::assert_is_dumping_archive();
   // Set the values to what they should be at run time. Note that
   // this Method can no longer be executed during dump time.
-  _i2i_entry = Interpreter::entry_for_cds_method(this);
+  _i2i_entry = Interpreter::entry_for_cds_method(methodHandle(Thread::current(), this));
   _from_interpreted_entry = _i2i_entry;
 
   if (DynamicDumpSharedSpaces) {
@@ -1568,14 +1573,14 @@
   if (m->has_stackmap_table()) {
     int code_attribute_length = m->stackmap_data()->length();
     Array<u1>* stackmap_data =
-      MetadataFactory::new_array<u1>(loader_data, code_attribute_length, 0, CHECK_NULL);
+      MetadataFactory::new_array<u1>(loader_data, code_attribute_length, 0, CHECK_(methodHandle()));
     memcpy((void*)stackmap_data->adr_at(0),
            (void*)m->stackmap_data()->adr_at(0), code_attribute_length);
     newm->set_stackmap_data(stackmap_data);
   }
 
   // copy annotations over to new method
-  newcm->copy_annotations_from(loader_data, cm, CHECK_NULL);
+  newcm->copy_annotations_from(loader_data, cm, CHECK_(methodHandle()));
   return newm;
 }
 
@@ -1739,12 +1744,15 @@
 
 // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
 // default_methods also uses this without the ordering for fast find_method
-void Method::sort_methods(Array<Method*>* methods, bool set_idnums) {
+void Method::sort_methods(Array<Method*>* methods, bool set_idnums, method_comparator_func func) {
   int length = methods->length();
   if (length > 1) {
+    if (func == NULL) {
+      func = method_comparator;
+    }
     {
       NoSafepointVerifier nsv;
-      QuickSort::sort(methods->data(), length, method_comparator, /*idempotent=*/false);
+      QuickSort::sort(methods->data(), length, func, /*idempotent=*/false);
     }
     // Reset method ordering
     if (set_idnums) {
@@ -2216,6 +2224,11 @@
   }
 }
 
+jmethodID Method::jmethod_id() {
+  methodHandle mh(Thread::current(), this);
+  return method_holder()->get_jmethod_id(mh);
+}
+
 // Mark a jmethodID as free.  This is called when there is a data race in
 // InstanceKlass while creating the jmethodID cache.
 void Method::destroy_jmethod_id(ClassLoaderData* loader_data, jmethodID m) {
--- a/src/hotspot/share/oops/method.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/method.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -856,7 +856,7 @@
   static void print_jmethod_ids(const ClassLoaderData* loader_data, outputStream* out) PRODUCT_RETURN;
 
   // Get this method's jmethodID -- allocate if it doesn't exist
-  jmethodID jmethod_id()                            { return method_holder()->get_jmethod_id(this); }
+  jmethodID jmethod_id();
 
   // Lookup the jmethodID for this method.  Return NULL if not found.
   // NOTE that this function can be called from a signal handler
@@ -1006,8 +1006,10 @@
   void print_name(outputStream* st = tty)        PRODUCT_RETURN; // prints as "virtual void foo(int)"
 #endif
 
+  typedef int (*method_comparator_func)(Method* a, Method* b);
+
   // Helper routine used for method sorting
-  static void sort_methods(Array<Method*>* methods, bool set_idnums = true);
+  static void sort_methods(Array<Method*>* methods, bool set_idnums = true, method_comparator_func func = NULL);
 
   // Deallocation function for redefine classes or if an error occurs
   void deallocate_contents(ClassLoaderData* loader_data);
--- a/src/hotspot/share/oops/methodData.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/methodData.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -668,7 +668,7 @@
 }
 
 int ParametersTypeData::compute_cell_count(Method* m) {
-  if (!MethodData::profile_parameters_for_method(m)) {
+  if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
     return 0;
   }
   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
@@ -709,7 +709,7 @@
   int size = MethodData::compute_allocation_size_in_words(method);
 
   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
-    MethodData(method(), size, THREAD);
+    MethodData(method, size, THREAD);
 }
 
 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
@@ -1220,8 +1220,9 @@
 }
 
 void MethodData::initialize() {
+  Thread* thread = Thread::current();
   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
-  ResourceMark rm;
+  ResourceMark rm(thread);
 
   init();
   set_creation_mileage(mileage_of(method()));
@@ -1231,7 +1232,7 @@
   int data_size = 0;
   int empty_bc_count = 0;  // number of bytecodes lacking data
   _data[0] = 0;  // apparently not set below.
-  BytecodeStream stream(method());
+  BytecodeStream stream(methodHandle(thread, method()));
   Bytecodes::Code c;
   bool needs_speculative_traps = false;
   while ((c = stream.next()) >= 0) {
@@ -1284,7 +1285,7 @@
 
   post_initialize(&stream);
 
-  assert(object_size == compute_allocation_size_in_bytes(methodHandle(_method)), "MethodData: computed size != initialized size");
+  assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
   set_size(object_size);
 }
 
@@ -1296,7 +1297,8 @@
 
   // Set per-method invoke- and backedge mask.
   double scale = 1.0;
-  CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale);
+  methodHandle mh(Thread::current(), _method);
+  CompilerOracle::has_option_value(mh, "CompileThresholdScaling", scale);
   _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
   _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
 
@@ -1313,8 +1315,8 @@
 #if INCLUDE_RTM_OPT
   _rtm_state = NoRTM; // No RTM lock eliding by default
   if (UseRTMLocking &&
-      !CompilerOracle::has_option_string(_method, "NoRTMLockEliding")) {
-    if (CompilerOracle::has_option_string(_method, "UseRTMLockEliding") || !UseRTMDeopt) {
+      !CompilerOracle::has_option_string(mh, "NoRTMLockEliding")) {
+    if (CompilerOracle::has_option_string(mh, "UseRTMLockEliding") || !UseRTMDeopt) {
       // Generate RTM lock eliding code without abort ratio calculation code.
       _rtm_state = UseRTM;
     } else if (UseRTMDeopt) {
--- a/src/hotspot/share/oops/oop.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/oop.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -45,7 +45,6 @@
 class ScanClosure;
 class FastScanClosure;
 class FilteringClosure;
-class CMSIsAliveClosure;
 
 class PSPromotionManager;
 class ParCompactionManager;
@@ -93,9 +92,6 @@
   inline int klass_gap() const;
   inline void set_klass_gap(int z);
   static inline void set_klass_gap(HeapWord* mem, int z);
-  // For when the klass pointer is being used as a linked list "next" field.
-  inline void set_klass_to_list_ptr(oop k);
-  inline oop list_ptr_from_klass();
 
   // size of object header, aligned to platform wordSize
   static int header_size() { return sizeof(oopDesc)/HeapWordSize; }
--- a/src/hotspot/share/oops/oop.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/oops/oop.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -179,26 +179,6 @@
   set_klass_gap((HeapWord*)this, v);
 }
 
-void oopDesc::set_klass_to_list_ptr(oop k) {
-  // This is only to be used during GC, for from-space objects, so no
-  // barrier is needed.
-  if (UseCompressedClassPointers) {
-    _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k);  // may be null (parnew overflow handling)
-  } else {
-    _metadata._klass = (Klass*)(address)k;
-  }
-}
-
-oop oopDesc::list_ptr_from_klass() {
-  // This is only to be used during GC, for from-space objects.
-  if (UseCompressedClassPointers) {
-    return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
-  } else {
-    // Special case for GC
-    return (oop)(address)_metadata._klass;
-  }
-}
-
 bool oopDesc::is_a(Klass* k) const {
   return klass()->is_subtype_of(k);
 }
@@ -244,25 +224,13 @@
       // skipping the intermediate round to HeapWordSize.
       s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize);
 
-      // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
+      // UseParallelGC and UseG1GC can change the length field
       // of an "old copy" of an object array in the young gen so it indicates
       // the grey portion of an already copied array. This will cause the first
       // disjunct below to fail if the two comparands are computed across such
       // a concurrent change.
-      // ParNew also runs with promotion labs (which look like int
-      // filler arrays) which are subject to changing their declared size
-      // when finally retiring a PLAB; this also can cause the first disjunct
-      // to fail for another worker thread that is concurrently walking the block
-      // offset table. Both these invariant failures are benign for their
-      // current uses; we relax the assertion checking to cover these two cases below:
-      //     is_objArray() && is_forwarded()   // covers first scenario above
-      //  || is_typeArray()                    // covers second scenario above
-      // If and when UseParallelGC uses the same obj array oop stealing/chunking
-      // technique, we will need to suitably modify the assertion.
       assert((s == klass->oop_size(this)) ||
-             (Universe::heap()->is_gc_active() &&
-              ((is_typeArray() && UseConcMarkSweepGC) ||
-               (is_objArray()  && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
+             (Universe::heap()->is_gc_active() && is_objArray() && is_forwarded() && (UseParallelGC || UseG1GC)),
              "wrong array object size");
     } else {
       // Must be zero, so bite the bullet and take the virtual call.
--- a/src/hotspot/share/opto/graphKit.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/graphKit.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1493,18 +1493,19 @@
                           bool require_atomic_access,
                           bool unaligned,
                           bool mismatched,
-                          bool unsafe) {
+                          bool unsafe,
+                          uint8_t barrier_data) {
   assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
   const TypePtr* adr_type = NULL; // debug-mode-only argument
   debug_only(adr_type = C->get_adr_type(adr_idx));
   Node* mem = memory(adr_idx);
   Node* ld;
   if (require_atomic_access && bt == T_LONG) {
-    ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe);
+    ld = LoadLNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
   } else if (require_atomic_access && bt == T_DOUBLE) {
-    ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe);
+    ld = LoadDNode::make_atomic(ctl, mem, adr, adr_type, t, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
   } else {
-    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe);
+    ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, unaligned, mismatched, unsafe, barrier_data);
   }
   ld = _gvn.transform(ld);
   if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
--- a/src/hotspot/share/opto/graphKit.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/graphKit.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -523,27 +523,27 @@
   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                   bool require_atomic_access = false, bool unaligned = false,
-                  bool mismatched = false, bool unsafe = false) {
+                  bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
     // This version computes alias_index from bottom_type
     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
                      mo, control_dependency, require_atomic_access,
-                     unaligned, mismatched, unsafe);
+                     unaligned, mismatched, unsafe, barrier_data);
   }
   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                   bool require_atomic_access = false, bool unaligned = false,
-                  bool mismatched = false, bool unsafe = false) {
+                  bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0) {
     // This version computes alias_index from an address type
     assert(adr_type != NULL, "use other make_load factory");
     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
                      mo, control_dependency, require_atomic_access,
-                     unaligned, mismatched, unsafe);
+                     unaligned, mismatched, unsafe, barrier_data);
   }
   // This is the base version which is given an alias index.
   Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
                   MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                   bool require_atomic_access = false, bool unaligned = false,
-                  bool mismatched = false, bool unsafe = false);
+                  bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
 
   // Create & transform a StoreNode and store the effect into the
   // parser's memory state.
--- a/src/hotspot/share/opto/lcm.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/lcm.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -958,7 +958,7 @@
       ready_cnt.at_put(n->_idx, local); // Count em up
 
 #ifdef ASSERT
-      if( UseConcMarkSweepGC || UseG1GC ) {
+      if (UseG1GC) {
         if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
           // Check the precedence edges
           for (uint prec = n->req(); prec < n->len(); prec++) {
--- a/src/hotspot/share/opto/loopTransform.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/loopTransform.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -2975,16 +2975,17 @@
 }
 
 #ifdef ASSERT
-static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) {
-  Node *ctrl  = cl->skip_predicates();
+static CountedLoopNode* locate_pre_from_main(CountedLoopNode* main_loop) {
+  assert(!main_loop->is_main_no_pre_loop(), "Does not have a pre loop");
+  Node* ctrl = main_loop->skip_predicates();
   assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
-  Node *iffm = ctrl->in(0);
+  Node* iffm = ctrl->in(0);
   assert(iffm->Opcode() == Op_If, "");
-  Node *p_f = iffm->in(0);
+  Node* p_f = iffm->in(0);
   assert(p_f->Opcode() == Op_IfFalse, "");
-  CountedLoopEndNode *pre_end = p_f->in(0)->as_CountedLoopEnd();
-  assert(pre_end->loopnode()->is_pre_loop(), "");
-  return pre_end->loopnode();
+  CountedLoopNode* pre_loop = p_f->in(0)->as_CountedLoopEnd()->loopnode();
+  assert(pre_loop->is_pre_loop(), "No pre loop found");
+  return pre_loop;
 }
 #endif
 
@@ -3010,7 +3011,7 @@
   }
 
   CountedLoopNode* main_head = next_head->as_CountedLoop();
-  if (!main_head->is_main_loop()) {
+  if (!main_head->is_main_loop() || main_head->is_main_no_pre_loop()) {
     return;
   }
 
--- a/src/hotspot/share/opto/macro.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/macro.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1399,7 +1399,7 @@
     // other threads.
     // Other threads include java threads and JVM internal threads
     // (for example concurrent GC threads). Current concurrent GC
-    // implementation: CMS and G1 will not scan newly created object,
+    // implementation: G1 will not scan newly created object,
     // so it's safe to skip storestore barrier when allocation does
     // not escape.
     if (!alloc->does_not_escape_thread() &&
--- a/src/hotspot/share/opto/memnode.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/memnode.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -806,7 +806,7 @@
 //----------------------------LoadNode::make-----------------------------------
 // Polymorphic factory method:
 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo,
-                     ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
+                     ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
   Compile* C = gvn.C;
 
   // sanity check the alias category against the created node type
@@ -857,6 +857,7 @@
   if (unsafe) {
     load->set_unsafe_access();
   }
+  load->set_barrier_data(barrier_data);
   if (load->Opcode() == Op_LoadN) {
     Node* ld = gvn.transform(load);
     return new DecodeNNode(ld, ld->bottom_type()->make_ptr());
@@ -866,7 +867,7 @@
 }
 
 LoadLNode* LoadLNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
-                                  ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
+                                  ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
   bool require_atomic = true;
   LoadLNode* load = new LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, control_dependency, require_atomic);
   if (unaligned) {
@@ -878,11 +879,12 @@
   if (unsafe) {
     load->set_unsafe_access();
   }
+  load->set_barrier_data(barrier_data);
   return load;
 }
 
 LoadDNode* LoadDNode::make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo,
-                                  ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe) {
+                                  ControlDependency control_dependency, bool unaligned, bool mismatched, bool unsafe, uint8_t barrier_data) {
   bool require_atomic = true;
   LoadDNode* load = new LoadDNode(ctl, mem, adr, adr_type, rt, mo, control_dependency, require_atomic);
   if (unaligned) {
@@ -894,6 +896,7 @@
   if (unsafe) {
     load->set_unsafe_access();
   }
+  load->set_barrier_data(barrier_data);
   return load;
 }
 
--- a/src/hotspot/share/opto/memnode.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/memnode.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -228,7 +228,8 @@
   static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
                     const TypePtr* at, const Type *rt, BasicType bt,
                     MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
-                    bool unaligned = false, bool mismatched = false, bool unsafe = false);
+                    bool unaligned = false, bool mismatched = false, bool unsafe = false,
+                    uint8_t barrier_data = 0);
 
   virtual uint hash()   const;  // Check the type
 
@@ -412,7 +413,7 @@
   bool require_atomic_access() const { return _require_atomic_access; }
   static LoadLNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
-                                bool unaligned = false, bool mismatched = false, bool unsafe = false);
+                                bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     LoadNode::dump_spec(st);
@@ -464,7 +465,7 @@
   bool require_atomic_access() const { return _require_atomic_access; }
   static LoadDNode* make_atomic(Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
                                 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest,
-                                bool unaligned = false, bool mismatched = false, bool unsafe = false);
+                                bool unaligned = false, bool mismatched = false, bool unsafe = false, uint8_t barrier_data = 0);
 #ifndef PRODUCT
   virtual void dump_spec(outputStream *st) const {
     LoadNode::dump_spec(st);
--- a/src/hotspot/share/opto/phaseX.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/phaseX.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -891,7 +891,10 @@
 }
 
 bool PhaseGVN::is_dominator_helper(Node *d, Node *n, bool linear_only) {
-  if (d->is_top() || n->is_top()) {
+  if (d->is_top() || (d->is_Proj() && d->in(0)->is_top())) {
+    return false;
+  }
+  if (n->is_top() || (n->is_Proj() && n->in(0)->is_top())) {
     return false;
   }
   assert(d->is_CFG() && n->is_CFG(), "must have CFG nodes");
--- a/src/hotspot/share/opto/superword.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/opto/superword.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -699,24 +699,34 @@
         // Put memory ops from remaining packs back on memops list for
         // the best alignment search.
         uint orig_msize = memops.size();
-        for (int i = 0; i < _packset.length(); i++) {
-          Node_List* p = _packset.at(i);
+        if (_packset.length() == 1 && orig_msize == 0) {
+          // If there are no remaining memory ops and only 1 pack we have only one choice
+          // for the alignment
+          Node_List* p = _packset.at(0);
+          assert(p->size() > 0, "sanity");
           MemNode* s = p->at(0)->as_Mem();
           assert(!same_velt_type(s, mem_ref), "sanity");
-          memops.push(s);
-        }
-        best_align_to_mem_ref = find_align_to_ref(memops);
-        if (best_align_to_mem_ref == NULL) {
-          if (TraceSuperWord) {
-            tty->print_cr("SuperWord::find_adjacent_refs(): best_align_to_mem_ref == NULL");
+          best_align_to_mem_ref = s;
+        } else {
+          for (int i = 0; i < _packset.length(); i++) {
+            Node_List* p = _packset.at(i);
+            MemNode* s = p->at(0)->as_Mem();
+            assert(!same_velt_type(s, mem_ref), "sanity");
+            memops.push(s);
           }
-          break;
+          best_align_to_mem_ref = find_align_to_ref(memops);
+          if (best_align_to_mem_ref == NULL) {
+            if (TraceSuperWord) {
+              tty->print_cr("SuperWord::find_adjacent_refs(): best_align_to_mem_ref == NULL");
+            }
+            break;
+          }
+          best_iv_adjustment = get_iv_adjustment(best_align_to_mem_ref);
+          NOT_PRODUCT(find_adjacent_refs_trace_1(best_align_to_mem_ref, best_iv_adjustment);)
+          // Restore list.
+          while (memops.size() > orig_msize)
+            (void)memops.pop();
         }
-        best_iv_adjustment = get_iv_adjustment(best_align_to_mem_ref);
-        NOT_PRODUCT(find_adjacent_refs_trace_1(best_align_to_mem_ref, best_iv_adjustment);)
-        // Restore list.
-        while (memops.size() > orig_msize)
-          (void)memops.pop();
       }
     } // unaligned memory accesses
 
--- a/src/hotspot/share/prims/jniCheck.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/jniCheck.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -28,6 +28,8 @@
 #include "classfile/javaClasses.inline.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/guardedMemory.hpp"
 #include "oops/instanceKlass.hpp"
@@ -2303,10 +2305,7 @@
          "Mismatched JNINativeInterface tables, check for new entries");
 
   // with -verbose:jni this message will print
-  if (PrintJNIResolving) {
-    tty->print_cr("Checked JNI functions are being used to " \
-                  "validate JNI usage");
-  }
+  log_debug(jni, resolve)("Checked JNI functions are being used to validate JNI usage");
 
   return &checked_jni_NativeInterface;
 }
--- a/src/hotspot/share/prims/jvm.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/jvm.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -48,7 +48,7 @@
 #include "memory/universe.hpp"
 #include "oops/access.inline.hpp"
 #include "oops/constantPool.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/method.hpp"
 #include "oops/objArrayKlass.hpp"
@@ -1615,7 +1615,8 @@
     for (int i = 0; i < num_params; i++) {
       MethodParametersElement* params = mh->method_parameters_start();
       int index = params[i].name_cp_index;
-      bounds_check(mh->constants(), index, CHECK_NULL);
+      constantPoolHandle cp(THREAD, mh->constants());
+      bounds_check(cp, index, CHECK_NULL);
 
       if (0 != index && !mh->constants()->tag_at(index).is_utf8()) {
         THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
--- a/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
 #include "classfile/symbolTable.hpp"
 #include "interpreter/bytecodeStream.hpp"
 #include "memory/universe.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "prims/jvmtiClassFileReconstituter.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/signature.hpp"
--- a/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiCodeBlobEvents.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -253,7 +253,7 @@
 
 
   // Generate line numbers using PcDesc and ScopeDesc info
-  methodHandle mh(nm->method());
+  methodHandle mh(Thread::current(), nm->method());
 
   if (!mh->is_native()) {
     PcDesc *pcd;
--- a/src/hotspot/share/prims/jvmtiEnv.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -809,14 +809,11 @@
     LogConfiguration::configure_stdout(level, false, LOG_TAGS(class, load));
     break;
   case JVMTI_VERBOSE_GC:
-    if (value == 0) {
-      LogConfiguration::configure_stdout(LogLevel::Off, true, LOG_TAGS(gc));
-    } else {
-      LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(gc));
-    }
+    LogConfiguration::configure_stdout(level, true, LOG_TAGS(gc));
     break;
   case JVMTI_VERBOSE_JNI:
-    PrintJNIResolving = value != 0;
+    level = value == 0 ? LogLevel::Off : LogLevel::Debug;
+    LogConfiguration::configure_stdout(level, true, LOG_TAGS(jni, resolve));
     break;
   default:
     return JVMTI_ERROR_ILLEGAL_ARGUMENT;
@@ -3155,7 +3152,7 @@
   NULL_CHECK(method_oop, JVMTI_ERROR_INVALID_METHODID);
 
   HandleMark hm;
-  methodHandle method(method_oop);
+  methodHandle method(Thread::current(), method_oop);
   jint size = (jint)method->code_size();
   jvmtiError err = allocate(size, bytecodes_ptr);
   if (err != JVMTI_ERROR_NONE) {
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -41,7 +41,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/constantPool.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/klassVtable.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiImpl.hpp"
@@ -3492,12 +3492,11 @@
     // cached references to old methods so it doesn't need to be
     // updated. We can simply start with the previous version(s) in
     // that case.
-    constantPoolHandle other_cp;
     ConstantPoolCache* cp_cache;
 
     if (!ik->is_being_redefined()) {
       // this klass' constant pool cache may need adjustment
-      other_cp = constantPoolHandle(ik->constants());
+      ConstantPool* other_cp = ik->constants();
       cp_cache = other_cp->cache();
       if (cp_cache != NULL) {
         cp_cache->adjust_method_entries(&trace_name_printed);
@@ -3516,13 +3515,13 @@
   }
 }
 
-void VM_RedefineClasses::update_jmethod_ids() {
+void VM_RedefineClasses::update_jmethod_ids(Thread* thread) {
   for (int j = 0; j < _matching_methods_length; ++j) {
     Method* old_method = _matching_old_methods[j];
     jmethodID jmid = old_method->find_jmethod_id_or_null();
     if (jmid != NULL) {
       // There is a jmethodID, change it to point to the new method
-      methodHandle new_method_h(_matching_new_methods[j]);
+      methodHandle new_method_h(thread, _matching_new_methods[j]);
       Method::change_method_associated_with_jmethod_id(jmid, new_method_h());
       assert(Method::resolve_jmethod_id(jmid) == _matching_new_methods[j],
              "should be replaced");
@@ -3961,7 +3960,7 @@
   _new_methods = scratch_class->methods();
   _the_class = the_class;
   compute_added_deleted_matching_methods();
-  update_jmethod_ids();
+  update_jmethod_ids(THREAD);
 
   _any_class_has_resolved_methods = the_class->has_resolved_methods() || _any_class_has_resolved_methods;
 
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -407,7 +407,7 @@
   void compute_added_deleted_matching_methods();
 
   // Change jmethodIDs to point to the new methods
-  void update_jmethod_ids();
+  void update_jmethod_ids(Thread* thread);
 
   // In addition to marking methods as old and/or obsolete, this routine
   // counts the number of methods that are EMCP (Equivalent Module Constant Pool).
--- a/src/hotspot/share/prims/methodComparator.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/methodComparator.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -52,8 +52,9 @@
 
   _old_cp = old_method->constants();
   _new_cp = new_method->constants();
-  BytecodeStream s_old(old_method);
-  BytecodeStream s_new(new_method);
+  Thread* THREAD = Thread::current();
+  BytecodeStream s_old(methodHandle(THREAD, old_method));
+  BytecodeStream s_new(methodHandle(THREAD, new_method));
   _s_old = &s_old;
   _s_new = &s_new;
   Bytecodes::Code c_old, c_new;
--- a/src/hotspot/share/prims/methodHandles.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/methodHandles.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -222,7 +222,7 @@
 
 oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
   assert(info.resolved_appendix().is_null(), "only normal methods here");
-  methodHandle m = info.resolved_method();
+  methodHandle m(Thread::current(), info.resolved_method());
   assert(m.not_null(), "null method handle");
   InstanceKlass* m_klass = m->method_holder();
   assert(m_klass != NULL, "null holder for method handle");
--- a/src/hotspot/share/prims/nativeLookup.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/nativeLookup.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -27,6 +27,8 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "classfile/vmSymbols.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/instanceKlass.hpp"
@@ -378,7 +380,7 @@
       if (wrapper_method != NULL && !wrapper_method->is_native()) {
         // we found a wrapper method, use its native entry
         method->set_is_prefixed_native();
-        return lookup_entry(wrapper_method, in_base_library, THREAD);
+        return lookup_entry(methodHandle(THREAD, wrapper_method), in_base_library, THREAD);
       }
     }
   }
@@ -413,11 +415,11 @@
     method->set_native_function(entry,
       Method::native_bind_event_is_interesting);
     // -verbose:jni printing
-    if (PrintJNIResolving) {
+    if (log_is_enabled(Debug, jni, resolve)) {
       ResourceMark rm(THREAD);
-      tty->print_cr("[Dynamic-linking native method %s.%s ... JNI]",
-        method->method_holder()->external_name(),
-        method->name()->as_C_string());
+      log_debug(jni, resolve)("[Dynamic-linking native method %s.%s ... JNI]",
+                              method->method_holder()->external_name(),
+                              method->name()->as_C_string());
     }
   }
   return method->native_function();
--- a/src/hotspot/share/prims/stackwalk.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/stackwalk.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -156,7 +156,7 @@
                 method->external_name()));
     }
     // fill in StackFrameInfo and initialize MemberName
-    stream.fill_frame(index, frames_array, method, CHECK_0);
+    stream.fill_frame(index, frames_array, methodHandle(THREAD, method), CHECK_0);
     if (++frames_decoded >= max_nframes)  break;
   }
   return frames_decoded;
--- a/src/hotspot/share/prims/unsafe.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/unsafe.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -32,7 +32,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/access.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/typeArrayOop.inline.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/prims/whitebox.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -619,6 +619,29 @@
   THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1AuxiliaryMemoryUsage: G1 GC is not enabled");
 WB_END
 
+WB_ENTRY(jint, WB_G1ActiveMemoryNodeCount(JNIEnv* env, jobject o))
+  if (UseG1GC) {
+    G1NUMA* numa = G1NUMA::numa();
+    return (jint)numa->num_active_nodes();
+  }
+  THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1ActiveMemoryNodeCount: G1 GC is not enabled");
+WB_END
+
+WB_ENTRY(jintArray, WB_G1MemoryNodeIds(JNIEnv* env, jobject o))
+  if (UseG1GC) {
+    G1NUMA* numa = G1NUMA::numa();
+    int num_node_ids = (int)numa->num_active_nodes();
+    const int* node_ids = numa->node_ids();
+
+    typeArrayOop result = oopFactory::new_intArray(num_node_ids, CHECK_NULL);
+    for (int i = 0; i < num_node_ids; i++) {
+      result->int_at_put(i, (jint)node_ids[i]);
+    }
+    return (jintArray) JNIHandles::make_local(env, result);
+  }
+  THROW_MSG_NULL(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1MemoryNodeIds: G1 GC is not enabled");
+WB_END
+
 class OldRegionsLivenessClosure: public HeapRegionClosure {
 
  private:
@@ -1726,10 +1749,6 @@
   return (jlong) MetaspaceGC::capacity_until_GC();
 WB_END
 
-WB_ENTRY(jboolean, WB_MetaspaceShouldConcurrentCollect(JNIEnv* env, jobject wb))
-  return MetaspaceGC::should_concurrent_collect();
-WB_END
-
 WB_ENTRY(jlong, WB_MetaspaceReserveAlignment(JNIEnv* env, jobject wb))
   return (jlong)Metaspace::reserve_alignment();
 WB_END
@@ -2199,6 +2218,8 @@
   {CC"g1StartConcMarkCycle",       CC"()Z",           (void*)&WB_G1StartMarkCycle  },
   {CC"g1AuxiliaryMemoryUsage", CC"()Ljava/lang/management/MemoryUsage;",
                                                       (void*)&WB_G1AuxiliaryMemoryUsage  },
+  {CC"g1ActiveMemoryNodeCount", CC"()I",              (void*)&WB_G1ActiveMemoryNodeCount },
+  {CC"g1MemoryNodeIds",    CC"()[I",                  (void*)&WB_G1MemoryNodeIds },
   {CC"g1GetMixedGCInfo",   CC"(I)[J",                 (void*)&WB_G1GetMixedGCInfo },
 #endif // INCLUDE_G1GC
 #if INCLUDE_G1GC || INCLUDE_PARALLELGC
@@ -2309,7 +2330,6 @@
      CC"(Ljava/lang/ClassLoader;JJ)V",                (void*)&WB_FreeMetaspace },
   {CC"incMetaspaceCapacityUntilGC", CC"(J)J",         (void*)&WB_IncMetaspaceCapacityUntilGC },
   {CC"metaspaceCapacityUntilGC", CC"()J",             (void*)&WB_MetaspaceCapacityUntilGC },
-  {CC"metaspaceShouldConcurrentCollect", CC"()Z",     (void*)&WB_MetaspaceShouldConcurrentCollect },
   {CC"metaspaceReserveAlignment", CC"()J",            (void*)&WB_MetaspaceReserveAlignment },
   {CC"getCPUFeatures",     CC"()Ljava/lang/String;",  (void*)&WB_GetCPUFeatures     },
   {CC"getNMethod0",         CC"(Ljava/lang/reflect/Executable;Z)[Ljava/lang/Object;",
--- a/src/hotspot/share/runtime/arguments.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/arguments.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -85,6 +85,7 @@
 bool   Arguments::_ClipInlining                 = ClipInlining;
 intx   Arguments::_Tier3InvokeNotifyFreqLog     = Tier3InvokeNotifyFreqLog;
 intx   Arguments::_Tier4InvocationThreshold     = Tier4InvocationThreshold;
+size_t Arguments::_SharedBaseAddress            = SharedBaseAddress;
 
 bool   Arguments::_enable_preview               = false;
 
@@ -515,7 +516,6 @@
   // -------------- Deprecated Flags --------------
   // --- Non-alias flags - sorted by obsolete_in then expired_in:
   { "MaxGCMinorPauseMillis",        JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
-  { "UseConcMarkSweepGC",           JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() },
   { "MaxRAMFraction",               JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "MinRAMFraction",               JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
   { "InitialRAMFraction",           JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
@@ -544,6 +544,81 @@
   { "CompilationPolicyChoice",       JDK_Version::jdk(13),     JDK_Version::jdk(14), JDK_Version::jdk(15) },
   { "TraceNMethodInstalls",          JDK_Version::jdk(13),     JDK_Version::jdk(14), JDK_Version::jdk(15) },
   { "FailOverToOldVerifier",         JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "UseConcMarkSweepGC",            JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSAbortSemantics",                       JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSAbortablePrecleanMinWorkPerIteration", JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSBitMapYieldQuantum",                   JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSBootstrapOccupancy",                   JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSClassUnloadingEnabled",                JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSClassUnloadingMaxInterval",            JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSCleanOnEnter",                         JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSConcMarkMultiple",                     JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSConcurrentMTEnabled",                  JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSCoordinatorYieldSleepCount",           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSEdenChunksRecordAlways",               JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSExpAvgFactor",                         JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSExtrapolateSweep",                     JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSIncrementalSafetyFactor",              JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSIndexedFreeListReplenish",             JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSInitiatingOccupancyFraction",          JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSIsTooFullPercentage",                  JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSLargeCoalSurplusPercent",              JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSLargeSplitSurplusPercent",             JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSLoopWarn",                             JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSMaxAbortablePrecleanLoops",            JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSMaxAbortablePrecleanTime",             JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSOldPLABMax",                           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSOldPLABMin",                           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSOldPLABNumRefills",                    JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSOldPLABReactivityFactor",              JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSOldPLABResizeQuicker",                 JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSOldPLABToleranceFactor",               JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPLABRecordAlways",                     JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSParallelInitialMarkEnabled",           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSParallelRemarkEnabled",                JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSParallelSurvivorRemarkEnabled",        JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanDenominator",                  JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanIter",                         JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanNumerator",                    JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanRefLists1",                    JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanRefLists2",                    JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanSurvivors1",                   JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanSurvivors2",                   JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleanThreshold",                    JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrecleaningEnabled",                   JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrintChunksInDump",                    JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSPrintObjectsInDump",                   JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSRemarkVerifyVariant",                  JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSReplenishIntermediate",                JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSRescanMultiple",                       JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSSamplingGrain",                        JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSScavengeBeforeRemark",                 JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSScheduleRemarkEdenPenetration",        JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSScheduleRemarkEdenSizeThreshold",      JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSScheduleRemarkSamplingRatio",          JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSSmallCoalSurplusPercent",              JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSSmallSplitSurplusPercent",             JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSSplitIndexedFreeListBlocks",           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSTriggerRatio",                         JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSWorkQueueDrainThreshold",              JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSYield",                                JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSYieldSleepCount",                      JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMSYoungGenPerWorker",                    JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMS_FLSPadding",                          JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMS_FLSWeight",                           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMS_SweepPadding",                        JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMS_SweepTimerThresholdMillis",           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "CMS_SweepWeight",                         JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "FLSAlwaysCoalesceLarge",                  JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "FLSCoalescePolicy",                       JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "FLSLargestBlockCoalesceProximity",        JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "OldPLABWeight",                           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "ParGCDesiredObjsFromOverflowList",        JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "ParGCTrimOverflow",                       JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "ParGCUseLocalOverflow",                   JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "ResizeOldPLAB",                           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "UseCMSBestFit",                           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
+  { "UseCMSInitiatingOccupancyOnly",           JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(15) },
   { "BindGCTaskThreadsToCPUs",       JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
   { "UseGCTaskAffinity",             JDK_Version::undefined(), JDK_Version::jdk(14), JDK_Version::jdk(16) },
 
@@ -588,6 +663,7 @@
   { "TraceSafepointCleanupTime", LogLevel::Info,  true,  LOG_TAGS(safepoint, cleanup) },
   { "TraceJVMTIObjectTagging",   LogLevel::Debug, true,  LOG_TAGS(jvmti, objecttagging) },
   { "TraceRedefineClasses",      LogLevel::Info,  false, LOG_TAGS(redefine, class) },
+  { "PrintJNIResolving",         LogLevel::Debug, true,  LOG_TAGS(jni, resolve) },
   { NULL,                        LogLevel::Off,   false, LOG_TAGS(_NO_TAG) }
 };
 
@@ -2199,6 +2275,9 @@
     Arguments::_Tier4InvocationThreshold = Tier4InvocationThreshold;
   }
 
+  // CDS dumping always write the archive to the default value of SharedBaseAddress.
+  Arguments::_SharedBaseAddress = SharedBaseAddress;
+
   // Setup flags for mixed which is the default
   set_mode_flags(_mixed);
 
@@ -2400,9 +2479,7 @@
       } else if (!strcmp(tail, ":gc")) {
         LogConfiguration::configure_stdout(LogLevel::Info, true, LOG_TAGS(gc));
       } else if (!strcmp(tail, ":jni")) {
-        if (FLAG_SET_CMDLINE(PrintJNIResolving, true) != JVMFlag::SUCCESS) {
-          return JNI_EINVAL;
-        }
+        LogConfiguration::configure_stdout(LogLevel::Debug, true, LOG_TAGS(jni, resolve));
       }
     // -da / -ea / -disableassertions / -enableassertions
     // These accept an optional class/package name separated by a colon, e.g.,
@@ -2548,16 +2625,10 @@
       }
     // -Xconcgc
     } else if (match_option(option, "-Xconcgc")) {
-      if (FLAG_SET_CMDLINE(UseConcMarkSweepGC, true) != JVMFlag::SUCCESS) {
-        return JNI_EINVAL;
-      }
-      handle_extra_cms_flags("-Xconcgc uses UseConcMarkSweepGC");
+      warning("-Xconcgc uses UseConcMarkSweepGC; support was removed for both options in 14.0");
     // -Xnoconcgc
     } else if (match_option(option, "-Xnoconcgc")) {
-      if (FLAG_SET_CMDLINE(UseConcMarkSweepGC, false) != JVMFlag::SUCCESS) {
-        return JNI_EINVAL;
-      }
-      handle_extra_cms_flags("-Xnoconcgc uses UseConcMarkSweepGC");
+      warning("-Xnoconcgc uses UseConcMarkSweepGC; support was removed for both options in 14.0");
     // -Xbatch
     } else if (match_option(option, "-Xbatch")) {
       if (FLAG_SET_CMDLINE(BackgroundCompilation, false) != JVMFlag::SUCCESS) {
@@ -3819,15 +3890,6 @@
   return true;
 }
 
-void Arguments::handle_extra_cms_flags(const char* msg) {
-  SpecialFlag flag;
-  const char *flag_name = "UseConcMarkSweepGC";
-  if (lookup_special_flag(flag_name, flag)) {
-    handle_aliases_and_deprecation(flag_name, /* print warning */ true);
-    warning("%s", msg);
-  }
-}
-
 // Parse entry point called from JNI_CreateJavaVM
 
 jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) {
@@ -4161,14 +4223,11 @@
          FLAG_SET_DEFAULT(MinHeapDeltaBytes, 64*M);
       }
     }
-    // UseNUMAInterleaving is set to ON for all collectors and
-    // platforms when UseNUMA is set to ON. NUMA-aware collectors
-    // such as the parallel collector for Linux and Solaris will
-    // interleave old gen and survivor spaces on top of NUMA
-    // allocation policy for the eden space.
-    // Non NUMA-aware collectors such as CMS, G1 and Serial-GC on
-    // all platforms and ParallelGC on Windows will interleave all
-    // of the heap spaces across NUMA nodes.
+    // UseNUMAInterleaving is set to ON for all collectors and platforms when
+    // UseNUMA is set to ON. NUMA-aware collectors will interleave old gen and
+    // survivor spaces on top of NUMA allocation policy for the eden space.
+    // Non NUMA-aware collectors will interleave all of the heap spaces across
+    // NUMA nodes.
     if (FLAG_IS_DEFAULT(UseNUMAInterleaving)) {
       FLAG_SET_ERGO(UseNUMAInterleaving, true);
     }
--- a/src/hotspot/share/runtime/arguments.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/arguments.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -425,8 +425,6 @@
 
   static bool handle_deprecated_print_gc_flags();
 
-  static void handle_extra_cms_flags(const char* msg);
-
   static jint parse_vm_init_args(const JavaVMInitArgs *vm_options_args,
                                  const JavaVMInitArgs *java_tool_options_args,
                                  const JavaVMInitArgs *java_options_args,
@@ -483,6 +481,7 @@
 
   static char*  SharedArchivePath;
   static char*  SharedDynamicArchivePath;
+  static size_t _SharedBaseAddress; // The default value specified in globals.hpp
   static int num_archives(const char* archive_path) NOT_CDS_RETURN_(0);
   static void extract_shared_archive_paths(const char* archive_path,
                                          char** base_archive_path,
@@ -565,7 +564,7 @@
 
   static const char* GetSharedArchivePath() { return SharedArchivePath; }
   static const char* GetSharedDynamicArchivePath() { return SharedDynamicArchivePath; }
-
+  static size_t default_SharedBaseAddress() { return _SharedBaseAddress; }
   // Java launcher properties
   static void process_sun_java_launcher_properties(JavaVMInitArgs* args);
 
--- a/src/hotspot/share/runtime/deoptimization.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/deoptimization.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -44,7 +44,7 @@
 #include "oops/objArrayKlass.hpp"
 #include "oops/objArrayOop.inline.hpp"
 #include "oops/oop.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "oops/typeArrayOop.inline.hpp"
 #include "oops/verifyOopClosure.hpp"
 #include "prims/jvmtiThreadState.hpp"
@@ -428,7 +428,7 @@
   // frame.
   bool caller_was_method_handle = false;
   if (deopt_sender.is_interpreted_frame()) {
-    methodHandle method = deopt_sender.interpreter_frame_method();
+    methodHandle method(thread, deopt_sender.interpreter_frame_method());
     Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
     if (cur.is_invokedynamic() || cur.is_invokehandle()) {
       // Method handle invokes may involve fairly arbitrary chains of
@@ -1536,7 +1536,7 @@
   assert(caller_frame.cb()->as_compiled_method_or_null() == cm, "expect top frame compiled method");
   Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
 
-  MethodData* trap_mdo = get_method_data(thread, cm->method(), true);
+  MethodData* trap_mdo = get_method_data(thread, methodHandle(thread, cm->method()), true);
   if (trap_mdo != NULL) {
     trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
   }
@@ -1701,7 +1701,7 @@
           );
     }
 
-    methodHandle    trap_method = trap_scope->method();
+    methodHandle    trap_method(THREAD, trap_scope->method());
     int             trap_bci    = trap_scope->bci();
 #if INCLUDE_JVMCI
     jlong           speculation = thread->pending_failed_speculation();
@@ -1732,7 +1732,7 @@
     methodHandle profiled_method;
 #if INCLUDE_JVMCI
     if (nm->is_compiled_by_jvmci()) {
-      profiled_method = nm->method();
+      profiled_method = methodHandle(THREAD, nm->method());
     } else {
       profiled_method = trap_method;
     }
--- a/src/hotspot/share/runtime/fieldDescriptor.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/fieldDescriptor.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -30,7 +30,7 @@
 #include "oops/constantPool.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.inline.hpp"
-#include "oops/fieldStreams.hpp"
+#include "oops/fieldStreams.inline.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/signature.hpp"
--- a/src/hotspot/share/runtime/fieldDescriptor.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/fieldDescriptor.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -31,11 +31,11 @@
 // must be put in this file, as they require runtime/handles.inline.hpp.
 
 inline Symbol* fieldDescriptor::name() const {
-  return field()->name(_cp);
+  return field()->name(_cp());
 }
 
 inline Symbol* fieldDescriptor::signature() const {
-  return field()->signature(_cp);
+  return field()->signature(_cp());
 }
 
 inline InstanceKlass* fieldDescriptor::field_holder() const {
--- a/src/hotspot/share/runtime/flags/jvmFlag.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlag.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -447,11 +447,11 @@
     //  an eye-pleasing tabular output is created.
     //
     //  Sample output:
-    //       bool CMSScavengeBeforeRemark                  = false                                     {product} {default}
-    //      uintx CMSScheduleRemarkEdenPenetration         = 50                                        {product} {default}
-    //     size_t CMSScheduleRemarkEdenSizeThreshold       = 2097152                                   {product} {default}
-    //      uintx CMSScheduleRemarkSamplingRatio           = 5                                         {product} {default}
-    //     double CMSSmallCoalSurplusPercent               = 1.050000                                  {product} {default}
+    //       bool ThreadPriorityVerbose                    = false                                     {product} {default}
+    //      uintx ThresholdTolerance                       = 10                                        {product} {default}
+    //     size_t TLABSize                                 = 0                                         {product} {default}
+    //      uintx SurvivorRatio                            = 8                                         {product} {default}
+    //     double InitialRAMPercentage                     = 1.562500                                  {product} {default}
     //      ccstr CompileCommandFile                       = MyFile.cmd                                {product} {command line}
     //  ccstrlist CompileOnly                              = Method1
     //            CompileOnly                             += Method2                                   {product} {command line}
--- a/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/flags/jvmFlagConstraintsCompiler.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -61,7 +61,7 @@
  *    'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result,
  *    the minimum number of compiler threads is 2.
  * 5) Non-tiered emulation mode is on. CompilationModeFlag::disable_intermediate() == true.
- *    The mininum number of threads is 2. But if CompilationModeFlag::quick_internal() == false, then it's 1.
+ *    The minimum number of threads is 2. But if CompilationModeFlag::quick_internal() == false, then it's 1.
  */
 JVMFlag::Error CICompilerCountConstraintFunc(intx value, bool verbose) {
   int min_number_of_compiler_threads = 0;
--- a/src/hotspot/share/runtime/frame.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/frame.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1164,7 +1164,7 @@
 
     // Compute the actual expression stack size
     InterpreterOopMap mask;
-    OopMapCache::compute_one_oop_map(m, bci, &mask);
+    OopMapCache::compute_one_oop_map(methodHandle(Thread::current(), m), bci, &mask);
     intptr_t* tos = NULL;
     // Report each stack element and mark as owned by this frame
     for (int e = 0; e < mask.expression_stack_size(); e++) {
--- a/src/hotspot/share/runtime/globals.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/globals.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -305,9 +305,6 @@
   notproduct(bool, TraceCodeBlobStacks, false,                              \
           "Trace stack-walk of codeblobs")                                  \
                                                                             \
-  product(bool, PrintJNIResolving, false,                                   \
-          "Used to implement -v:jni")                                       \
-                                                                            \
   notproduct(bool, PrintRewrites, false,                                    \
           "Print methods that are being rewritten")                         \
                                                                             \
@@ -1950,9 +1947,6 @@
   experimental(bool, UseCriticalCompilerThreadPriority, false,              \
           "Compiler thread(s) run at critical scheduling priority")         \
                                                                             \
-  experimental(bool, UseCriticalCMSThreadPriority, false,                   \
-          "ConcurrentMarkSweep thread runs at critical scheduling priority")\
-                                                                            \
   develop(intx, NewCodeParameter,      0,                                   \
           "Testing Only: Create a dedicated integer parameter before "      \
           "putback")                                                        \
@@ -2436,6 +2430,14 @@
   product(ccstr, ExtraSharedClassListFile, NULL,                            \
           "Extra classlist for building the CDS archive file")              \
                                                                             \
+  diagnostic(intx, ArchiveRelocationMode, 0,                                \
+           "(0) first map at preferred address, and if "                    \
+           "unsuccessful, map at alternative address (default); "           \
+           "(1) always map at alternative address; "                        \
+           "(2) always map at preferred address, and if unsuccessful, "     \
+           "do not map the archive")                                        \
+           range(0, 2)                                                      \
+                                                                            \
   experimental(size_t, ArrayAllocatorMallocLimit,                           \
           SOLARIS_ONLY(64*K) NOT_SOLARIS((size_t)-1),                       \
           "Allocation less than this value will be allocated "              \
--- a/src/hotspot/share/runtime/handles.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/handles.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -143,7 +143,6 @@
    public:                                       \
     /* Constructors */                           \
     name##Handle () : _value(NULL), _thread(NULL) {}   \
-    name##Handle (type* obj);                    \
     name##Handle (Thread* thread, type* obj);    \
                                                  \
     name##Handle (const name##Handle &h);        \
--- a/src/hotspot/share/runtime/handles.inline.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/handles.inline.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -55,14 +55,6 @@
 
 // Constructor for metadata handles
 #define DEF_METADATA_HANDLE_FN(name, type) \
-inline name##Handle::name##Handle(type* obj) : _value(obj), _thread(NULL) {       \
-  if (obj != NULL) {                                                   \
-    assert(((Metadata*)obj)->is_valid(), "obj is valid");              \
-    _thread = Thread::current();                                       \
-    assert (_thread->is_in_stack((address)this), "not on stack?");     \
-    _thread->metadata_handles()->push((Metadata*)obj);                 \
-  }                                                                    \
-}                                                                      \
 inline name##Handle::name##Handle(Thread* thread, type* obj) : _value(obj), _thread(thread) { \
   if (obj != NULL) {                                                   \
     assert(((Metadata*)obj)->is_valid(), "obj is valid");              \
--- a/src/hotspot/share/runtime/java.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/java.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -733,6 +733,7 @@
   return (e > o) ? 1 : ((e == o) ? 0 : -1);
 }
 
+/* See JEP 223 */
 void JDK_Version::to_string(char* buffer, size_t buflen) const {
   assert(buffer && buflen > 0, "call with useful buffer");
   size_t index = 0;
@@ -744,13 +745,12 @@
         &buffer[index], buflen - index, "%d.%d", _major, _minor);
     if (rc == -1) return;
     index += rc;
-    if (_security > 0) {
-      rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _security);
+    if (_patch > 0) {
+      rc = jio_snprintf(&buffer[index], buflen - index, ".%d.%d", _security, _patch);
       if (rc == -1) return;
       index += rc;
-    }
-    if (_patch > 0) {
-      rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _patch);
+    } else if (_security > 0) {
+      rc = jio_snprintf(&buffer[index], buflen - index, ".%d", _security);
       if (rc == -1) return;
       index += rc;
     }
--- a/src/hotspot/share/runtime/javaCalls.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/javaCalls.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -186,7 +186,7 @@
   LinkInfo link_info(spec_klass, name, signature);
   LinkResolver::resolve_virtual_call(
           callinfo, receiver, recvrKlass, link_info, true, CHECK);
-  methodHandle method = callinfo.selected_method();
+  methodHandle method(THREAD, callinfo.selected_method());
   assert(method.not_null(), "should have thrown exception");
 
   // Invoke the method
@@ -222,7 +222,7 @@
   CallInfo callinfo;
   LinkInfo link_info(klass, name, signature);
   LinkResolver::resolve_special_call(callinfo, args->receiver(), link_info, CHECK);
-  methodHandle method = callinfo.selected_method();
+  methodHandle method(THREAD, callinfo.selected_method());
   assert(method.not_null(), "should have thrown exception");
 
   // Invoke the method
@@ -257,7 +257,7 @@
   CallInfo callinfo;
   LinkInfo link_info(klass, name, signature);
   LinkResolver::resolve_static_call(callinfo, link_info, true, CHECK);
-  methodHandle method = callinfo.selected_method();
+  methodHandle method(THREAD, callinfo.selected_method());
   assert(method.not_null(), "should have thrown exception");
 
   // Invoke the method
--- a/src/hotspot/share/runtime/mutexLocker.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/mutexLocker.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -72,6 +72,7 @@
 Monitor* CGC_lock                     = NULL;
 Monitor* STS_lock                     = NULL;
 Monitor* FullGCCount_lock             = NULL;
+Monitor* G1OldGCCount_lock            = NULL;
 Monitor* DirtyCardQ_CBL_mon           = NULL;
 Mutex*   Shared_DirtyCardQ_lock       = NULL;
 Mutex*   MarkStackFreeList_lock       = NULL;
@@ -203,6 +204,8 @@
 
   def(FullGCCount_lock             , PaddedMonitor, leaf,        true,  _safepoint_check_never);      // in support of ExplicitGCInvokesConcurrent
   if (UseG1GC) {
+    def(G1OldGCCount_lock          , PaddedMonitor, leaf,        true,  _safepoint_check_always);
+
     def(DirtyCardQ_CBL_mon         , PaddedMonitor, access,      true,  _safepoint_check_never);
     def(Shared_DirtyCardQ_lock     , PaddedMutex  , access + 1,  true,  _safepoint_check_never);
 
@@ -263,10 +266,6 @@
   def(PerfDataMemAlloc_lock        , PaddedMutex  , leaf,        true,  _safepoint_check_always); // used for allocating PerfData memory for performance data
   def(PerfDataManager_lock         , PaddedMutex  , leaf,        true,  _safepoint_check_always); // used for synchronized access to PerfDataManager resources
 
-  // CMS_modUnionTable_lock                   leaf
-  // CMS_bitMap_lock                          leaf 1
-  // CMS_freeList_lock                        leaf 2
-
   def(Threads_lock                 , PaddedMonitor, barrier,     true,  _safepoint_check_always);  // Used for safepoint protocol.
   def(NonJavaThreadsList_lock      , PaddedMutex,   leaf,        true,  _safepoint_check_never);
   def(NonJavaThreadsListSync_lock  , PaddedMutex,   leaf,        true,  _safepoint_check_never);
--- a/src/hotspot/share/runtime/mutexLocker.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/mutexLocker.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -68,6 +68,7 @@
                                                  // fore- & background GC threads.
 extern Monitor* STS_lock;                        // used for joining/leaving SuspendibleThreadSet.
 extern Monitor* FullGCCount_lock;                // in support of "concurrent" full gc
+extern Monitor* G1OldGCCount_lock;               // in support of "concurrent" full gc
 extern Monitor* DirtyCardQ_CBL_mon;              // Protects dirty card Q
                                                  // completed buffer queue.
 extern Mutex*   Shared_DirtyCardQ_lock;          // Lock protecting dirty card
--- a/src/hotspot/share/runtime/os.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/os.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -374,6 +374,7 @@
   static size_t numa_get_leaf_groups(int *ids, size_t size);
   static bool   numa_topology_changed();
   static int    numa_get_group_id();
+  static int    numa_get_group_id_for_address(const void* address);
 
   // Page manipulation
   struct page_info {
--- a/src/hotspot/share/runtime/reflection.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/reflection.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -978,7 +978,7 @@
                                        LinkInfo(klass, name, signature),
                                        true,
                                        CHECK_(methodHandle()));
-  return info.selected_method();
+  return methodHandle(THREAD, info.selected_method());
 }
 
 // Conversion
--- a/src/hotspot/share/runtime/sharedRuntime.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1022,7 +1022,7 @@
   return find_callee_info_helper(thread, vfst, bc, callinfo, THREAD);
 }
 
-methodHandle SharedRuntime::extract_attached_method(vframeStream& vfst) {
+Method* SharedRuntime::extract_attached_method(vframeStream& vfst) {
   CompiledMethod* caller = vfst.nm();
 
   nmethodLocker caller_lock(caller);
@@ -1055,9 +1055,9 @@
   int bytecode_index = bytecode.index();
   bc = bytecode.invoke_code();
 
-  methodHandle attached_method = extract_attached_method(vfst);
+  methodHandle attached_method(THREAD, extract_attached_method(vfst));
   if (attached_method.not_null()) {
-    methodHandle callee = bytecode.static_target(CHECK_NH);
+    Method* callee = bytecode.static_target(CHECK_NH);
     vmIntrinsics::ID id = callee->intrinsic_id();
     // When VM replaces MH.invokeBasic/linkTo* call with a direct/virtual call,
     // it attaches statically resolved method to the call site.
@@ -1105,8 +1105,8 @@
     frame callerFrame = stubFrame.sender(&reg_map2);
 
     if (attached_method.is_null()) {
-      methodHandle callee = bytecode.static_target(CHECK_NH);
-      if (callee.is_null()) {
+      Method* callee = bytecode.static_target(CHECK_NH);
+      if (callee == NULL) {
         THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
       }
     }
@@ -1144,7 +1144,6 @@
       rk = constants->klass_ref_at(bytecode_index, CHECK_NH);
     }
     Klass* static_receiver_klass = rk;
-    methodHandle callee = callinfo.selected_method();
     assert(receiver_klass->is_subtype_of(static_receiver_klass),
            "actual receiver must be subclass of static receiver klass");
     if (receiver_klass->is_instance_klass()) {
@@ -1182,7 +1181,7 @@
     Bytecodes::Code bc;
     CallInfo callinfo;
     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
-    callee_method = callinfo.selected_method();
+    callee_method = methodHandle(THREAD, callinfo.selected_method());
   }
   assert(callee_method()->is_method(), "must be");
   return callee_method;
@@ -1325,7 +1324,7 @@
   Bytecodes::Code invoke_code = Bytecodes::_illegal;
   Handle receiver = find_callee_info(thread, invoke_code,
                                      call_info, CHECK_(methodHandle()));
-  methodHandle callee_method = call_info.selected_method();
+  methodHandle callee_method(THREAD, call_info.selected_method());
 
   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
          (!is_virtual && invoke_code == Bytecodes::_invokespecial) ||
@@ -1479,7 +1478,7 @@
   // Get the called method from the invoke bytecode.
   vframeStream vfst(thread, true);
   assert(!vfst.at_end(), "Java frame must exist");
-  methodHandle caller(vfst.method());
+  methodHandle caller(thread, vfst.method());
   Bytecode_invoke invoke(caller, vfst.bci());
   DEBUG_ONLY( invoke.verify(); )
 
@@ -1493,7 +1492,7 @@
   // Install exception and return forward entry.
   address res = StubRoutines::throw_AbstractMethodError_entry();
   JRT_BLOCK
-    methodHandle callee = invoke.static_target(thread);
+    methodHandle callee(thread, invoke.static_target(thread));
     if (!callee.is_null()) {
       oop recv = callerFrame.retrieve_receiver(&reg_map);
       Klass *recv_klass = (recv != NULL) ? recv->klass() : NULL;
@@ -1657,7 +1656,7 @@
     return callee_method;
   }
 
-  methodHandle callee_method = call_info.selected_method();
+  methodHandle callee_method(thread, call_info.selected_method());
 
 #ifndef PRODUCT
   Atomic::inc(&_ic_miss_ctr);
--- a/src/hotspot/share/runtime/sharedRuntime.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/sharedRuntime.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -351,7 +351,7 @@
                                         Bytecodes::Code& bc,
                                         CallInfo& callinfo, TRAPS);
 
-  static methodHandle extract_attached_method(vframeStream& vfst);
+  static Method* extract_attached_method(vframeStream& vfst);
 
   static address clean_virtual_call_entry();
   static address clean_opt_virtual_call_entry();
--- a/src/hotspot/share/runtime/thread.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/thread.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -4367,7 +4367,7 @@
 //   + Call before_exit(), prepare for VM exit
 //      > run VM level shutdown hooks (they are registered through JVM_OnExit(),
 //        currently the only user of this mechanism is File.deleteOnExit())
-//      > stop StatSampler, watcher thread, CMS threads,
+//      > stop StatSampler, watcher thread,
 //        post thread end and vm death events to JVMTI,
 //        stop signal thread
 //   + Call JavaThread::exit(), it will:
--- a/src/hotspot/share/runtime/vframeArray.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/vframeArray.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -589,8 +589,8 @@
     if (index == 0) {
       callee_parameters = callee_locals = 0;
     } else {
-      methodHandle caller = elem->method();
-      methodHandle callee = element(index - 1)->method();
+      methodHandle caller(THREAD, elem->method());
+      methodHandle callee(THREAD, element(index - 1)->method());
       Bytecode_invoke inv(caller, elem->bci());
       // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
       // NOTE:  Use machinery here that avoids resolving of any kind.
--- a/src/hotspot/share/runtime/vmOperations.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/vmOperations.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -63,11 +63,10 @@
   template(GenCollectForAllocation)               \
   template(ParallelGCFailedAllocation)            \
   template(ParallelGCSystemGC)                    \
-  template(CMS_Initial_Mark)                      \
-  template(CMS_Final_Remark)                      \
   template(G1CollectForAllocation)                \
   template(G1CollectFull)                         \
   template(G1Concurrent)                          \
+  template(G1TryInitiateConcMark)                 \
   template(ZMarkStart)                            \
   template(ZMarkEnd)                              \
   template(ZRelocateStart)                        \
--- a/src/hotspot/share/runtime/vmStructs.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/runtime/vmStructs.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1103,7 +1103,7 @@
   CDS_ONLY(nonstatic_field(FileMapInfo,        _header,                   FileMapHeader*))                                           \
   CDS_ONLY(   static_field(FileMapInfo,        _current_info,             FileMapInfo*))                                             \
   CDS_ONLY(nonstatic_field(FileMapHeader,      _space[0],                 CDSFileMapRegion))                                         \
-  CDS_ONLY(nonstatic_field(CDSFileMapRegion,   _addr._base,               char*))                                                    \
+  CDS_ONLY(nonstatic_field(CDSFileMapRegion,   _mapped_base,              char*))                                                    \
   CDS_ONLY(nonstatic_field(CDSFileMapRegion,   _used,                     size_t))                                                   \
                                                                                                                                      \
   /******************/                                                                                                               \
@@ -2674,11 +2674,6 @@
   declare_constant(markWord::no_lock_in_place)                            \
   declare_constant(markWord::max_age)                                     \
                                                                           \
-  /* Constants in markWord used by CMS. */                                \
-  declare_constant(markWord::cms_shift)                                   \
-  declare_constant(markWord::cms_mask)                                    \
-  declare_constant(markWord::size_shift)                                  \
-                                                                          \
   /* InvocationCounter constants */                                       \
   declare_constant(InvocationCounter::count_increment)                    \
   declare_constant(InvocationCounter::count_shift)
--- a/src/hotspot/share/services/attachListener.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/services/attachListener.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -237,19 +237,7 @@
     // This helps reduces the amount of unreachable objects in the dump
     // and makes it easier to browse.
     HeapDumper dumper(live_objects_only /* request GC */);
-    int res = dumper.dump(op->arg(0));
-    if (res == 0) {
-      out->print_cr("Heap dump file created");
-    } else {
-      // heap dump failed
-      ResourceMark rm;
-      char* error = dumper.error_as_C_string();
-      if (error == NULL) {
-        out->print_cr("Dump failed - reason unknown");
-      } else {
-        out->print_cr("%s", error);
-      }
-    }
+    dumper.dump(op->arg(0), out);
   }
   return JNI_OK;
 }
--- a/src/hotspot/share/services/diagnosticCommand.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/services/diagnosticCommand.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -516,19 +516,7 @@
   // This helps reduces the amount of unreachable objects in the dump
   // and makes it easier to browse.
   HeapDumper dumper(!_all.value() /* request GC if _all is false*/);
-  int res = dumper.dump(_filename.value());
-  if (res == 0) {
-    output()->print_cr("Heap dump file created");
-  } else {
-    // heap dump failed
-    ResourceMark rm;
-    char* error = dumper.error_as_C_string();
-    if (error == NULL) {
-      output()->print_cr("Dump failed - reason unknown");
-    } else {
-      output()->print_cr("%s", error);
-    }
-  }
+  dumper.dump(_filename.value(), output());
 }
 
 int HeapDumpDCmd::num_arguments() {
--- a/src/hotspot/share/services/heapDumper.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/services/heapDumper.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -1969,12 +1969,12 @@
 }
 
 // dump the heap to given path.
-int HeapDumper::dump(const char* path) {
+int HeapDumper::dump(const char* path, outputStream* out) {
   assert(path != NULL && strlen(path) > 0, "path missing");
 
   // print message in interactive case
-  if (print_to_tty()) {
-    tty->print_cr("Dumping heap to %s ...", path);
+  if (out != NULL) {
+    out->print_cr("Dumping heap to %s ...", path);
     timer()->start();
   }
 
@@ -1982,8 +1982,8 @@
   DumpWriter writer(path);
   if (!writer.is_open()) {
     set_error(writer.error());
-    if (print_to_tty()) {
-      tty->print_cr("Unable to create %s: %s", path,
+    if (out != NULL) {
+      out->print_cr("Unable to create %s: %s", path,
         (error() != NULL) ? error() : "reason unknown");
     }
     return -1;
@@ -2003,13 +2003,13 @@
   set_error(writer.error());
 
   // print message in interactive case
-  if (print_to_tty()) {
+  if (out != NULL) {
     timer()->stop();
     if (error() == NULL) {
-      tty->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
+      out->print_cr("Heap dump file created [" JULONG_FORMAT " bytes in %3.3f secs]",
                     writer.bytes_written(), timer()->seconds());
     } else {
-      tty->print_cr("Dump file is incomplete: %s", writer.error());
+      out->print_cr("Dump file is incomplete: %s", writer.error());
     }
   }
 
@@ -2137,8 +2137,7 @@
   dump_file_seq++;   // increment seq number for next time we dump
 
   HeapDumper dumper(false /* no GC before heap dump */,
-                    true  /* send to tty */,
                     oome  /* pass along out-of-memory-error flag */);
-  dumper.dump(my_path);
+  dumper.dump(my_path, tty);
   os::free(my_path);
 }
--- a/src/hotspot/share/services/heapDumper.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/services/heapDumper.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -41,24 +41,22 @@
 //  }
 //
 
+class outputStream;
+
 class HeapDumper : public StackObj {
  private:
   char* _error;
-  bool _print_to_tty;
   bool _gc_before_heap_dump;
   bool _oome;
   elapsedTimer _t;
 
-  HeapDumper(bool gc_before_heap_dump, bool print_to_tty, bool oome) :
-    _error(NULL), _print_to_tty(print_to_tty), _gc_before_heap_dump(gc_before_heap_dump), _oome(oome) { }
+  HeapDumper(bool gc_before_heap_dump, bool oome) :
+    _error(NULL), _gc_before_heap_dump(gc_before_heap_dump), _oome(oome) { }
 
   // string representation of error
   char* error() const                   { return _error; }
   void set_error(char* error);
 
-  // indicates if progress messages can be sent to tty
-  bool print_to_tty() const             { return _print_to_tty; }
-
   // internal timer.
   elapsedTimer* timer()                 { return &_t; }
 
@@ -66,12 +64,13 @@
 
  public:
   HeapDumper(bool gc_before_heap_dump) :
-    _error(NULL), _print_to_tty(false), _gc_before_heap_dump(gc_before_heap_dump), _oome(false) { }
+    _error(NULL), _gc_before_heap_dump(gc_before_heap_dump), _oome(false) { }
 
   ~HeapDumper();
 
   // dumps the heap to the specified file, returns 0 if success.
-  int dump(const char* path);
+  // additional info is written to out if not NULL.
+  int dump(const char* path, outputStream* out = NULL);
 
   // returns error message (resource allocated), or NULL if no error
   char* error_as_C_string() const;
--- a/src/hotspot/share/utilities/debug.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/utilities/debug.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -642,6 +642,7 @@
   tty->print_cr("  pns(void* sp, void* fp, void* pc)  - print native (i.e. mixed) stack trace. E.g.");
   tty->print_cr("                   pns($sp, $rbp, $pc) on Linux/amd64 and Solaris/amd64 or");
   tty->print_cr("                   pns($sp, $ebp, $pc) on Linux/x86 or");
+  tty->print_cr("                   pns($sp, $fp, $pc)  on Linux/AArch64 or");
   tty->print_cr("                   pns($sp, 0, $pc)    on Linux/ppc64 or");
   tty->print_cr("                   pns($sp, $s8, $pc)  on Linux/mips or");
   tty->print_cr("                   pns($sp + 0x7ff, 0, $pc) on Solaris/SPARC");
--- a/src/hotspot/share/utilities/dtrace_disabled.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/utilities/dtrace_disabled.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -121,14 +121,6 @@
 #define HOTSPOT_VMOPS_END_ENABLED() 0
 
 /* hs_private provider probes */
-#define HS_PRIVATE_CMS_INITMARK_BEGIN()
-#define HS_PRIVATE_CMS_INITMARK_BEGIN_ENABLED() 0
-#define HS_PRIVATE_CMS_INITMARK_END()
-#define HS_PRIVATE_CMS_INITMARK_END_ENABLED() 0
-#define HS_PRIVATE_CMS_REMARK_BEGIN()
-#define HS_PRIVATE_CMS_REMARK_BEGIN_ENABLED() 0
-#define HS_PRIVATE_CMS_REMARK_END()
-#define HS_PRIVATE_CMS_REMARK_END_ENABLED() 0
 #define HS_PRIVATE_HASHTABLE_NEW_ENTRY(arg0, arg1, arg2, arg3)
 #define HS_PRIVATE_HASHTABLE_NEW_ENTRY_ENABLED() 0
 #define HS_PRIVATE_SAFEPOINT_BEGIN()
--- a/src/hotspot/share/utilities/exceptions.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/utilities/exceptions.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -229,7 +229,7 @@
     exception = Handle(THREAD, e);  // fill_in_stack trace does gc
     assert(k->is_initialized(), "need to increase java_thread_min_stack_allowed calculation");
     if (StackTraceInThrowable) {
-      java_lang_Throwable::fill_in_stack_trace(exception, method());
+      java_lang_Throwable::fill_in_stack_trace(exception, method);
     }
     // Increment counter for hs_err file reporting
     Atomic::inc(&Exceptions::_stack_overflow_errors);
--- a/src/hotspot/share/utilities/macros.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/utilities/macros.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -131,24 +131,6 @@
 #define NOT_MANAGEMENT_RETURN_(code) { return code; }
 #endif // INCLUDE_MANAGEMENT
 
-#ifndef INCLUDE_CMSGC
-#define INCLUDE_CMSGC 1
-#endif // INCLUDE_CMSGC
-
-#if INCLUDE_CMSGC
-#define CMSGC_ONLY(x) x
-#define CMSGC_ONLY_ARG(arg) arg,
-#define NOT_CMSGC(x)
-#define NOT_CMSGC_RETURN        /* next token must be ; */
-#define NOT_CMSGC_RETURN_(code) /* next token must be ; */
-#else
-#define CMSGC_ONLY(x)
-#define CMSGC_ONLY_ARG(x)
-#define NOT_CMSGC(x) x
-#define NOT_CMSGC_RETURN        {}
-#define NOT_CMSGC_RETURN_(code) { return code; }
-#endif // INCLUDE_CMSGC
-
 #ifndef INCLUDE_EPSILONGC
 #define INCLUDE_EPSILONGC 1
 #endif // INCLUDE_EPSILONGC
--- a/src/hotspot/share/utilities/xmlstream.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/utilities/xmlstream.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -385,9 +385,9 @@
 // ------------------------------------------------------------------
 // Output a method attribute, in the form " method='pkg/cls name sig'".
 // This is used only when there is no ciMethod available.
-void xmlStream::method(const methodHandle& method) {
+void xmlStream::method(Method* method) {
   assert_if_no_error(inside_attrs(), "printing attributes");
-  if (method.is_null())  return;
+  if (method == NULL)  return;
   print_raw(" method='");
   method_text(method);
   print("' bytes='%d'", method->code_size());
@@ -413,10 +413,10 @@
   }
 }
 
-void xmlStream::method_text(const methodHandle& method) {
+void xmlStream::method_text(Method* method) {
   ResourceMark rm;
   assert_if_no_error(inside_attrs(), "printing attributes");
-  if (method.is_null())  return;
+  if (method == NULL)  return;
   text()->print("%s", method->method_holder()->external_name());
   print_raw(" ");  // " " is easier for tools to parse than "::"
   method->name()->print_symbol_on(text());
--- a/src/hotspot/share/utilities/xmlstream.hpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/hotspot/share/utilities/xmlstream.hpp	Thu Nov 14 13:50:03 2019 +0000
@@ -137,14 +137,14 @@
 
   // commonly used XML attributes
   void          stamp();                 // stamp='1.234'
-  void          method(const methodHandle& m);  // method='k n s' ...
+  void          method(Method* m);       // method='k n s' ...
   void          klass(Klass* k);         // klass='name'
   void          name(const Symbol* s);   // name='name'
   void          object(const char* attr, Metadata* val);
   void          object(const char* attr, Handle val);
 
   // print the text alone (sans ''):
-  void          method_text(const methodHandle& m);
+  void          method_text(Method* m);
   void          klass_text(Klass* k);         // klass='name'
   void          name_text(const Symbol* s);   // name='name'
   void          object_text(Metadata* x);
--- a/src/java.base/macosx/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/macosx/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -429,8 +429,9 @@
 
             @Override
             public NumberFormat getIntegerInstance(Locale locale) {
-                return new DecimalFormat(getNumberPattern(NF_INTEGER, locale),
+                DecimalFormat format = new DecimalFormat(getNumberPattern(NF_INTEGER, locale),
                     DecimalFormatSymbols.getInstance(locale));
+                return HostLocaleProviderAdapter.makeIntegerFormatter(format);
             }
 
             @Override
--- a/src/java.base/share/classes/com/sun/crypto/provider/JceKeyStore.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/com/sun/crypto/provider/JceKeyStore.java	Thu Nov 14 13:50:03 2019 +0000
@@ -45,6 +45,8 @@
 import java.security.cert.CertificateException;
 import javax.crypto.SealedObject;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * This class provides the keystore implementation referred to as "jceks".
  * This implementation strongly protects the keystore private keys using
@@ -909,7 +911,8 @@
      * hash with a bit of whitener.
      */
     private MessageDigest getPreKeyedHash(char[] password)
-    throws NoSuchAlgorithmException, UnsupportedEncodingException {
+        throws NoSuchAlgorithmException
+    {
         int i, j;
 
         MessageDigest md = MessageDigest.getInstance("SHA");
@@ -921,7 +924,7 @@
         md.update(passwdBytes);
         for (i=0; i<passwdBytes.length; i++)
             passwdBytes[i] = 0;
-        md.update("Mighty Aphrodite".getBytes("UTF8"));
+        md.update("Mighty Aphrodite".getBytes(UTF_8));
         return md;
     }
 
--- a/src/java.base/share/classes/com/sun/crypto/provider/PBEWithMD5AndDESCipher.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/com/sun/crypto/provider/PBEWithMD5AndDESCipher.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 
 package com.sun.crypto.provider;
 
-import java.io.UnsupportedEncodingException;
 import java.security.*;
 import java.security.spec.*;
 import javax.crypto.*;
--- a/src/java.base/share/classes/com/sun/crypto/provider/PBEWithMD5AndTripleDESCipher.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/com/sun/crypto/provider/PBEWithMD5AndTripleDESCipher.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 
 package com.sun.crypto.provider;
 
-import java.io.UnsupportedEncodingException;
 import java.security.*;
 import java.security.spec.*;
 import javax.crypto.*;
--- a/src/java.base/share/classes/com/sun/crypto/provider/PBKDF2KeyImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/com/sun/crypto/provider/PBKDF2KeyImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -29,7 +29,6 @@
 import java.lang.ref.Reference;
 import java.nio.ByteBuffer;
 import java.nio.CharBuffer;
-import java.nio.charset.Charset;
 import java.util.Arrays;
 import java.util.Locale;
 import java.security.MessageDigest;
@@ -41,6 +40,8 @@
 import javax.crypto.SecretKey;
 import javax.crypto.spec.PBEKeySpec;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import jdk.internal.ref.CleanerFactory;
 
 /**
@@ -66,9 +67,8 @@
     private Mac prf;
 
     private static byte[] getPasswordBytes(char[] passwd) {
-        Charset utf8 = Charset.forName("UTF-8");
         CharBuffer cb = CharBuffer.wrap(passwd);
-        ByteBuffer bb = utf8.encode(cb);
+        ByteBuffer bb = UTF_8.encode(cb);
 
         int len = bb.limit();
         byte[] passwdBytes = new byte[len];
--- a/src/java.base/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/com/sun/crypto/provider/TlsPrfGenerator.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@
 import java.security.*;
 import java.security.spec.AlgorithmParameterSpec;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import javax.crypto.*;
 import javax.crypto.spec.SecretKeySpec;
 
@@ -153,7 +155,7 @@
         SecretKey key = spec.getSecret();
         byte[] secret = (key == null) ? null : key.getEncoded();
         try {
-            byte[] labelBytes = spec.getLabel().getBytes("UTF8");
+            byte[] labelBytes = spec.getLabel().getBytes(UTF_8);
             int n = spec.getOutputLength();
             byte[] prfBytes = (tls12 ?
                 doTLS12PRF(secret, labelBytes, spec.getSeed(), n,
@@ -163,8 +165,6 @@
             return new SecretKeySpec(prfBytes, "TlsPrf");
         } catch (GeneralSecurityException e) {
             throw new ProviderException("Could not generate PRF", e);
-        } catch (java.io.UnsupportedEncodingException e) {
-            throw new ProviderException("Could not generate PRF", e);
         }
     }
 
--- a/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/java/lang/invoke/InvokerBytecodeGenerator.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1168,7 +1168,7 @@
       *  } catch (Throwable e) {
       *      if (!a2.isInstance(e)) throw e;
       *      return a3.invokeBasic(ex, a6, a7);
-      *  }}
+      *  }}</pre></blockquote>
       */
     private Name emitGuardWithCatch(int pos) {
         Name args    = lambdaForm.names[pos];
@@ -1263,26 +1263,27 @@
      *                      load target                             (-- target)
      *                      load args                               (-- args... target)
      *                      INVOKEVIRTUAL MethodHandle.invokeBasic  (depends)
-     * FINALLY_NORMAL:      (-- r)
-     *                      load cleanup                            (-- cleanup r)
-     *                      SWAP                                    (-- r cleanup)
-     *                      ACONST_NULL                             (-- t r cleanup)
-     *                      SWAP                                    (-- r t cleanup)
-     *                      load args                               (-- args... r t cleanup)
-     *                      INVOKEVIRTUAL MethodHandle.invokeBasic  (-- r)
+     * FINALLY_NORMAL:      (-- r_2nd* r)
+     *                      store returned value                    (--)
+     *                      load cleanup                            (-- cleanup)
+     *                      ACONST_NULL                             (-- t cleanup)
+     *                      load returned value                     (-- r_2nd* r t cleanup)
+     *                      load args                               (-- args... r_2nd* r t cleanup)
+     *                      INVOKEVIRTUAL MethodHandle.invokeBasic  (-- r_2nd* r)
      *                      GOTO DONE
      * CATCH:               (-- t)
      *                      DUP                                     (-- t t)
      * FINALLY_EXCEPTIONAL: (-- t t)
      *                      load cleanup                            (-- cleanup t t)
      *                      SWAP                                    (-- t cleanup t)
-     *                      load default for r                      (-- r t cleanup t)
-     *                      load args                               (-- args... r t cleanup t)
-     *                      INVOKEVIRTUAL MethodHandle.invokeBasic  (-- r t)
-     *                      POP                                     (-- t)
+     *                      load default for r                      (-- r_2nd* r t cleanup t)
+     *                      load args                               (-- args... r_2nd* r t cleanup t)
+     *                      INVOKEVIRTUAL MethodHandle.invokeBasic  (-- r_2nd* r t)
+     *                      POP/POP2*                               (-- t)
      *                      ATHROW
      * DONE:                (-- r)
      * }</pre></blockquote>
+     * * = depends on whether the return type takes up 2 stack slots.
      */
     private Name emitTryFinally(int pos) {
         Name args    = lambdaForm.names[pos];
@@ -1295,7 +1296,9 @@
         Label lDone = new Label();
 
         Class<?> returnType = result.function.resolvedHandle().type().returnType();
+        BasicType basicReturnType = BasicType.basicType(returnType);
         boolean isNonVoid = returnType != void.class;
+
         MethodType type = args.function.resolvedHandle().type()
                 .dropParameterTypes(0,1)
                 .changeReturnType(returnType);
@@ -1316,13 +1319,14 @@
         mv.visitLabel(lTo);
 
         // FINALLY_NORMAL:
-        emitPushArgument(invoker, 1); // load cleanup
+        int index = extendLocalsMap(new Class<?>[]{ returnType });
         if (isNonVoid) {
-            mv.visitInsn(Opcodes.SWAP);
+            emitStoreInsn(basicReturnType, index);
         }
+        emitPushArgument(invoker, 1); // load cleanup
         mv.visitInsn(Opcodes.ACONST_NULL);
         if (isNonVoid) {
-            mv.visitInsn(Opcodes.SWAP);
+            emitLoadInsn(basicReturnType, index);
         }
         emitPushArguments(args, 1); // load args (skip 0: method handle)
         mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, MH, "invokeBasic", cleanupDesc, false);
@@ -1341,7 +1345,7 @@
         emitPushArguments(args, 1); // load args (skip 0: method handle)
         mv.visitMethodInsn(Opcodes.INVOKEVIRTUAL, MH, "invokeBasic", cleanupDesc, false);
         if (isNonVoid) {
-            mv.visitInsn(Opcodes.POP);
+            emitPopInsn(basicReturnType);
         }
         mv.visitInsn(Opcodes.ATHROW);
 
@@ -1351,6 +1355,24 @@
         return result;
     }
 
+    private void emitPopInsn(BasicType type) {
+        mv.visitInsn(popInsnOpcode(type));
+    }
+
+    private static int popInsnOpcode(BasicType type) {
+        switch (type) {
+            case I_TYPE:
+            case F_TYPE:
+            case L_TYPE:
+                return Opcodes.POP;
+            case J_TYPE:
+            case D_TYPE:
+                return Opcodes.POP2;
+            default:
+                throw new InternalError("unknown type: " + type);
+        }
+    }
+
     /**
      * Emit bytecode for the loop idiom.
      * <p>
--- a/src/java.base/share/classes/java/security/AlgorithmParameters.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/java/security/AlgorithmParameters.java	Thu Nov 14 13:50:03 2019 +0000
@@ -51,7 +51,6 @@
  * following standard {@code AlgorithmParameters} algorithms:
  * <ul>
  * <li>{@code AES}</li>
- * <li>{@code DES}</li>
  * <li>{@code DESede}</li>
  * <li>{@code DiffieHellman}</li>
  * <li>{@code DSA}</li>
--- a/src/java.base/share/classes/java/security/MessageDigest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/java/security/MessageDigest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -84,7 +84,6 @@
  * <p> Every implementation of the Java platform is required to support
  * the following standard {@code MessageDigest} algorithms:
  * <ul>
- * <li>{@code MD5}</li>
  * <li>{@code SHA-1}</li>
  * <li>{@code SHA-256}</li>
  * </ul>
--- a/src/java.base/share/classes/java/util/Arrays.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/java/util/Arrays.java	Thu Nov 14 13:50:03 2019 +0000
@@ -74,17 +74,658 @@
  */
 public class Arrays {
 
-    /**
-     * The minimum array length below which a parallel sorting
-     * algorithm will not further partition the sorting task. Using
-     * smaller sizes typically results in memory contention across
-     * tasks that makes parallel speedups unlikely.
-     */
-    private static final int MIN_ARRAY_SORT_GRAN = 1 << 13;
-
     // Suppresses default constructor, ensuring non-instantiability.
     private Arrays() {}
 
+    /*
+     * Sorting methods. Note that all public "sort" methods take the
+     * same form: performing argument checks if necessary, and then
+     * expanding arguments into those required for the internal
+     * implementation methods residing in other package-private
+     * classes (except for legacyMergeSort, included in this class).
+     */
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     */
+    public static void sort(int[] a) {
+        DualPivotQuicksort.sort(a, 0, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending order. The range
+     * to be sorted extends from the index {@code fromIndex}, inclusive, to
+     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
+     * the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     */
+    public static void sort(int[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, 0, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     */
+    public static void sort(long[] a) {
+        DualPivotQuicksort.sort(a, 0, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending order. The range
+     * to be sorted extends from the index {@code fromIndex}, inclusive, to
+     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
+     * the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     */
+    public static void sort(long[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, 0, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     */
+    public static void sort(short[] a) {
+        DualPivotQuicksort.sort(a, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending order. The range
+     * to be sorted extends from the index {@code fromIndex}, inclusive, to
+     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
+     * the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     */
+    public static void sort(short[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     */
+    public static void sort(char[] a) {
+        DualPivotQuicksort.sort(a, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending order. The range
+     * to be sorted extends from the index {@code fromIndex}, inclusive, to
+     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
+     * the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     */
+    public static void sort(char[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     */
+    public static void sort(byte[] a) {
+        DualPivotQuicksort.sort(a, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending order. The range
+     * to be sorted extends from the index {@code fromIndex}, inclusive, to
+     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
+     * the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     */
+    public static void sort(byte[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * <p>The {@code <} relation does not provide a total order on all float
+     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
+     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
+     * other value and all {@code Float.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     */
+    public static void sort(float[] a) {
+        DualPivotQuicksort.sort(a, 0, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending order. The range
+     * to be sorted extends from the index {@code fromIndex}, inclusive, to
+     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
+     * the range to be sorted is empty.
+     *
+     * <p>The {@code <} relation does not provide a total order on all float
+     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
+     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
+     * other value and all {@code Float.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     */
+    public static void sort(float[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, 0, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * <p>The {@code <} relation does not provide a total order on all double
+     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
+     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
+     * other value and all {@code Double.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     */
+    public static void sort(double[] a) {
+        DualPivotQuicksort.sort(a, 0, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending order. The range
+     * to be sorted extends from the index {@code fromIndex}, inclusive, to
+     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
+     * the range to be sorted is empty.
+     *
+     * <p>The {@code <} relation does not provide a total order on all double
+     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
+     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
+     * other value and all {@code Double.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort
+     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     */
+    public static void sort(double[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, 0, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(byte[] a) {
+        DualPivotQuicksort.sort(a, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending numerical order.
+     * The range to be sorted extends from the index {@code fromIndex},
+     * inclusive, to the index {@code toIndex}, exclusive. If
+     * {@code fromIndex == toIndex}, the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(byte[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(char[] a) {
+        DualPivotQuicksort.sort(a, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending numerical order.
+     * The range to be sorted extends from the index {@code fromIndex},
+     * inclusive, to the index {@code toIndex}, exclusive. If
+     * {@code fromIndex == toIndex}, the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(char[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(short[] a) {
+        DualPivotQuicksort.sort(a, 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending numerical order.
+     * The range to be sorted extends from the index {@code fromIndex},
+     * inclusive, to the index {@code toIndex}, exclusive. If
+     * {@code fromIndex == toIndex}, the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(short[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(int[] a) {
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending numerical order.
+     * The range to be sorted extends from the index {@code fromIndex},
+     * inclusive, to the index {@code toIndex}, exclusive. If
+     * {@code fromIndex == toIndex}, the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(int[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(long[] a) {
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending numerical order.
+     * The range to be sorted extends from the index {@code fromIndex},
+     * inclusive, to the index {@code toIndex}, exclusive. If
+     * {@code fromIndex == toIndex}, the range to be sorted is empty.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(long[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * <p>The {@code <} relation does not provide a total order on all float
+     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
+     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
+     * other value and all {@code Float.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(float[] a) {
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending numerical order.
+     * The range to be sorted extends from the index {@code fromIndex},
+     * inclusive, to the index {@code toIndex}, exclusive. If
+     * {@code fromIndex == toIndex}, the range to be sorted is empty.
+     *
+     * <p>The {@code <} relation does not provide a total order on all float
+     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
+     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
+     * other value and all {@code Float.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(float[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), fromIndex, toIndex);
+    }
+
+    /**
+     * Sorts the specified array into ascending numerical order.
+     *
+     * <p>The {@code <} relation does not provide a total order on all double
+     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
+     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
+     * other value and all {@code Double.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(double[] a) {
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), 0, a.length);
+    }
+
+    /**
+     * Sorts the specified range of the array into ascending numerical order.
+     * The range to be sorted extends from the index {@code fromIndex},
+     * inclusive, to the index {@code toIndex}, exclusive. If
+     * {@code fromIndex == toIndex}, the range to be sorted is empty.
+     *
+     * <p>The {@code <} relation does not provide a total order on all double
+     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
+     * value compares neither less than, greater than, nor equal to any value,
+     * even itself. This method uses the total order imposed by the method
+     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
+     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
+     * other value and all {@code Double.NaN} values are considered equal.
+     *
+     * @implNote The sorting algorithm is a Dual-Pivot Quicksort by
+     * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+     * offers O(n log(n)) performance on all data sets, and is typically
+     * faster than traditional (one-pivot) Quicksort implementations.
+     *
+     * @param a the array to be sorted
+     * @param fromIndex the index of the first element, inclusive, to be sorted
+     * @param toIndex the index of the last element, exclusive, to be sorted
+     *
+     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
+     * @throws ArrayIndexOutOfBoundsException
+     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
+     *
+     * @since 1.8
+     */
+    public static void parallelSort(double[] a, int fromIndex, int toIndex) {
+        rangeCheck(a.length, fromIndex, toIndex);
+        DualPivotQuicksort.sort(a, ForkJoinPool.getCommonPoolParallelism(), fromIndex, toIndex);
+    }
+
+    /**
+     * Checks that {@code fromIndex} and {@code toIndex} are in
+     * the range and throws an exception if they aren't.
+     */
+    static void rangeCheck(int arrayLength, int fromIndex, int toIndex) {
+        if (fromIndex > toIndex) {
+            throw new IllegalArgumentException(
+                "fromIndex(" + fromIndex + ") > toIndex(" + toIndex + ")");
+        }
+        if (fromIndex < 0) {
+            throw new ArrayIndexOutOfBoundsException(fromIndex);
+        }
+        if (toIndex > arrayLength) {
+            throw new ArrayIndexOutOfBoundsException(toIndex);
+        }
+    }
+
     /**
      * A comparator that implements the natural ordering of a group of
      * mutually comparable elements. May be used when a supplied
@@ -109,863 +750,12 @@
     }
 
     /**
-     * Checks that {@code fromIndex} and {@code toIndex} are in
-     * the range and throws an exception if they aren't.
-     */
-    static void rangeCheck(int arrayLength, int fromIndex, int toIndex) {
-        if (fromIndex > toIndex) {
-            throw new IllegalArgumentException(
-                    "fromIndex(" + fromIndex + ") > toIndex(" + toIndex + ")");
-        }
-        if (fromIndex < 0) {
-            throw new ArrayIndexOutOfBoundsException(fromIndex);
-        }
-        if (toIndex > arrayLength) {
-            throw new ArrayIndexOutOfBoundsException(toIndex);
-        }
-    }
-
-    /*
-     * Sorting methods. Note that all public "sort" methods take the
-     * same form: Performing argument checks if necessary, and then
-     * expanding arguments into those required for the internal
-     * implementation methods residing in other package-private
-     * classes (except for legacyMergeSort, included in this class).
-     */
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     */
-    public static void sort(int[] a) {
-        DualPivotQuicksort.sort(a, 0, a.length - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending order. The range
-     * to be sorted extends from the index {@code fromIndex}, inclusive, to
-     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
-     * the range to be sorted is empty.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     */
-    public static void sort(int[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     */
-    public static void sort(long[] a) {
-        DualPivotQuicksort.sort(a, 0, a.length - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending order. The range
-     * to be sorted extends from the index {@code fromIndex}, inclusive, to
-     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
-     * the range to be sorted is empty.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     */
-    public static void sort(long[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     */
-    public static void sort(short[] a) {
-        DualPivotQuicksort.sort(a, 0, a.length - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending order. The range
-     * to be sorted extends from the index {@code fromIndex}, inclusive, to
-     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
-     * the range to be sorted is empty.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     */
-    public static void sort(short[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     */
-    public static void sort(char[] a) {
-        DualPivotQuicksort.sort(a, 0, a.length - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending order. The range
-     * to be sorted extends from the index {@code fromIndex}, inclusive, to
-     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
-     * the range to be sorted is empty.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     */
-    public static void sort(char[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     */
-    public static void sort(byte[] a) {
-        DualPivotQuicksort.sort(a, 0, a.length - 1);
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending order. The range
-     * to be sorted extends from the index {@code fromIndex}, inclusive, to
-     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
-     * the range to be sorted is empty.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     */
-    public static void sort(byte[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        DualPivotQuicksort.sort(a, fromIndex, toIndex - 1);
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>The {@code <} relation does not provide a total order on all float
-     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
-     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
-     * other value and all {@code Float.NaN} values are considered equal.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     */
-    public static void sort(float[] a) {
-        DualPivotQuicksort.sort(a, 0, a.length - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending order. The range
-     * to be sorted extends from the index {@code fromIndex}, inclusive, to
-     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
-     * the range to be sorted is empty.
-     *
-     * <p>The {@code <} relation does not provide a total order on all float
-     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
-     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
-     * other value and all {@code Float.NaN} values are considered equal.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     */
-    public static void sort(float[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>The {@code <} relation does not provide a total order on all double
-     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
-     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
-     * other value and all {@code Double.NaN} values are considered equal.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     */
-    public static void sort(double[] a) {
-        DualPivotQuicksort.sort(a, 0, a.length - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending order. The range
-     * to be sorted extends from the index {@code fromIndex}, inclusive, to
-     * the index {@code toIndex}, exclusive. If {@code fromIndex == toIndex},
-     * the range to be sorted is empty.
-     *
-     * <p>The {@code <} relation does not provide a total order on all double
-     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
-     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
-     * other value and all {@code Double.NaN} values are considered equal.
-     *
-     * <p>Implementation note: The sorting algorithm is a Dual-Pivot Quicksort
-     * by Vladimir Yaroslavskiy, Jon Bentley, and Joshua Bloch. This algorithm
-     * offers O(n log(n)) performance on many data sets that cause other
-     * quicksorts to degrade to quadratic performance, and is typically
-     * faster than traditional (one-pivot) Quicksort implementations.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     */
-    public static void sort(double[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(byte[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(byte[]) Arrays.sort} method. The algorithm requires a
-     * working space no greater than the size of the original array. The
-     * {@link ForkJoinPool#commonPool() ForkJoin common pool} is used to
-     * execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(byte[] a) {
-        int n = a.length, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, 0, n - 1);
-        else
-            new ArraysParallelSortHelpers.FJByte.Sorter
-                (null, a, new byte[n], 0, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending numerical order.
-     * The range to be sorted extends from the index {@code fromIndex},
-     * inclusive, to the index {@code toIndex}, exclusive. If
-     * {@code fromIndex == toIndex}, the range to be sorted is empty.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(byte[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(byte[]) Arrays.sort} method. The algorithm requires a working
-     * space no greater than the size of the specified range of the original
-     * array. The {@link ForkJoinPool#commonPool() ForkJoin common pool} is
-     * used to execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(byte[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        int n = toIndex - fromIndex, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, fromIndex, toIndex - 1);
-        else
-            new ArraysParallelSortHelpers.FJByte.Sorter
-                (null, a, new byte[n], fromIndex, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(char[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(char[]) Arrays.sort} method. The algorithm requires a
-     * working space no greater than the size of the original array. The
-     * {@link ForkJoinPool#commonPool() ForkJoin common pool} is used to
-     * execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(char[] a) {
-        int n = a.length, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, 0, n - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJChar.Sorter
-                (null, a, new char[n], 0, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending numerical order.
-     * The range to be sorted extends from the index {@code fromIndex},
-     * inclusive, to the index {@code toIndex}, exclusive. If
-     * {@code fromIndex == toIndex}, the range to be sorted is empty.
-     *
-      @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(char[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(char[]) Arrays.sort} method. The algorithm requires a working
-     * space no greater than the size of the specified range of the original
-     * array. The {@link ForkJoinPool#commonPool() ForkJoin common pool} is
-     * used to execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(char[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        int n = toIndex - fromIndex, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJChar.Sorter
-                (null, a, new char[n], fromIndex, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(short[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(short[]) Arrays.sort} method. The algorithm requires a
-     * working space no greater than the size of the original array. The
-     * {@link ForkJoinPool#commonPool() ForkJoin common pool} is used to
-     * execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(short[] a) {
-        int n = a.length, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, 0, n - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJShort.Sorter
-                (null, a, new short[n], 0, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending numerical order.
-     * The range to be sorted extends from the index {@code fromIndex},
-     * inclusive, to the index {@code toIndex}, exclusive. If
-     * {@code fromIndex == toIndex}, the range to be sorted is empty.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(short[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(short[]) Arrays.sort} method. The algorithm requires a working
-     * space no greater than the size of the specified range of the original
-     * array. The {@link ForkJoinPool#commonPool() ForkJoin common pool} is
-     * used to execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(short[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        int n = toIndex - fromIndex, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJShort.Sorter
-                (null, a, new short[n], fromIndex, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(int[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(int[]) Arrays.sort} method. The algorithm requires a
-     * working space no greater than the size of the original array. The
-     * {@link ForkJoinPool#commonPool() ForkJoin common pool} is used to
-     * execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(int[] a) {
-        int n = a.length, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, 0, n - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJInt.Sorter
-                (null, a, new int[n], 0, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending numerical order.
-     * The range to be sorted extends from the index {@code fromIndex},
-     * inclusive, to the index {@code toIndex}, exclusive. If
-     * {@code fromIndex == toIndex}, the range to be sorted is empty.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(int[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(int[]) Arrays.sort} method. The algorithm requires a working
-     * space no greater than the size of the specified range of the original
-     * array. The {@link ForkJoinPool#commonPool() ForkJoin common pool} is
-     * used to execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(int[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        int n = toIndex - fromIndex, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJInt.Sorter
-                (null, a, new int[n], fromIndex, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(long[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(long[]) Arrays.sort} method. The algorithm requires a
-     * working space no greater than the size of the original array. The
-     * {@link ForkJoinPool#commonPool() ForkJoin common pool} is used to
-     * execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(long[] a) {
-        int n = a.length, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, 0, n - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJLong.Sorter
-                (null, a, new long[n], 0, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending numerical order.
-     * The range to be sorted extends from the index {@code fromIndex},
-     * inclusive, to the index {@code toIndex}, exclusive. If
-     * {@code fromIndex == toIndex}, the range to be sorted is empty.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(long[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(long[]) Arrays.sort} method. The algorithm requires a working
-     * space no greater than the size of the specified range of the original
-     * array. The {@link ForkJoinPool#commonPool() ForkJoin common pool} is
-     * used to execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(long[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        int n = toIndex - fromIndex, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJLong.Sorter
-                (null, a, new long[n], fromIndex, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>The {@code <} relation does not provide a total order on all float
-     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
-     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
-     * other value and all {@code Float.NaN} values are considered equal.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(float[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(float[]) Arrays.sort} method. The algorithm requires a
-     * working space no greater than the size of the original array. The
-     * {@link ForkJoinPool#commonPool() ForkJoin common pool} is used to
-     * execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(float[] a) {
-        int n = a.length, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, 0, n - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJFloat.Sorter
-                (null, a, new float[n], 0, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending numerical order.
-     * The range to be sorted extends from the index {@code fromIndex},
-     * inclusive, to the index {@code toIndex}, exclusive. If
-     * {@code fromIndex == toIndex}, the range to be sorted is empty.
-     *
-     * <p>The {@code <} relation does not provide a total order on all float
-     * values: {@code -0.0f == 0.0f} is {@code true} and a {@code Float.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Float#compareTo}: {@code -0.0f} is treated as less than value
-     * {@code 0.0f} and {@code Float.NaN} is considered greater than any
-     * other value and all {@code Float.NaN} values are considered equal.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(float[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(float[]) Arrays.sort} method. The algorithm requires a working
-     * space no greater than the size of the specified range of the original
-     * array. The {@link ForkJoinPool#commonPool() ForkJoin common pool} is
-     * used to execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(float[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        int n = toIndex - fromIndex, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJFloat.Sorter
-                (null, a, new float[n], fromIndex, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified array into ascending numerical order.
-     *
-     * <p>The {@code <} relation does not provide a total order on all double
-     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
-     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
-     * other value and all {@code Double.NaN} values are considered equal.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(double[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(double[]) Arrays.sort} method. The algorithm requires a
-     * working space no greater than the size of the original array. The
-     * {@link ForkJoinPool#commonPool() ForkJoin common pool} is used to
-     * execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(double[] a) {
-        int n = a.length, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, 0, n - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJDouble.Sorter
-                (null, a, new double[n], 0, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
-
-    /**
-     * Sorts the specified range of the array into ascending numerical order.
-     * The range to be sorted extends from the index {@code fromIndex},
-     * inclusive, to the index {@code toIndex}, exclusive. If
-     * {@code fromIndex == toIndex}, the range to be sorted is empty.
-     *
-     * <p>The {@code <} relation does not provide a total order on all double
-     * values: {@code -0.0d == 0.0d} is {@code true} and a {@code Double.NaN}
-     * value compares neither less than, greater than, nor equal to any value,
-     * even itself. This method uses the total order imposed by the method
-     * {@link Double#compareTo}: {@code -0.0d} is treated as less than value
-     * {@code 0.0d} and {@code Double.NaN} is considered greater than any
-     * other value and all {@code Double.NaN} values are considered equal.
-     *
-     * @implNote The sorting algorithm is a parallel sort-merge that breaks the
-     * array into sub-arrays that are themselves sorted and then merged. When
-     * the sub-array length reaches a minimum granularity, the sub-array is
-     * sorted using the appropriate {@link Arrays#sort(double[]) Arrays.sort}
-     * method. If the length of the specified array is less than the minimum
-     * granularity, then it is sorted using the appropriate {@link
-     * Arrays#sort(double[]) Arrays.sort} method. The algorithm requires a working
-     * space no greater than the size of the specified range of the original
-     * array. The {@link ForkJoinPool#commonPool() ForkJoin common pool} is
-     * used to execute any parallel tasks.
-     *
-     * @param a the array to be sorted
-     * @param fromIndex the index of the first element, inclusive, to be sorted
-     * @param toIndex the index of the last element, exclusive, to be sorted
-     *
-     * @throws IllegalArgumentException if {@code fromIndex > toIndex}
-     * @throws ArrayIndexOutOfBoundsException
-     *     if {@code fromIndex < 0} or {@code toIndex > a.length}
-     *
-     * @since 1.8
-     */
-    public static void parallelSort(double[] a, int fromIndex, int toIndex) {
-        rangeCheck(a.length, fromIndex, toIndex);
-        int n = toIndex - fromIndex, p, g;
-        if (n <= MIN_ARRAY_SORT_GRAN ||
-            (p = ForkJoinPool.getCommonPoolParallelism()) == 1)
-            DualPivotQuicksort.sort(a, fromIndex, toIndex - 1, null, 0, 0);
-        else
-            new ArraysParallelSortHelpers.FJDouble.Sorter
-                (null, a, new double[n], fromIndex, n, 0,
-                 ((g = n / (p << 2)) <= MIN_ARRAY_SORT_GRAN) ?
-                 MIN_ARRAY_SORT_GRAN : g).invoke();
-    }
+     * The minimum array length below which a parallel sorting
+     * algorithm will not further partition the sorting task. Using
+     * smaller sizes typically results in memory contention across
+     * tasks that makes parallel speedups unlikely.
+     */
+    private static final int MIN_ARRAY_SORT_GRAN = 1 << 13;
 
     /**
      * Sorts the specified array of objects into ascending order, according
@@ -2599,7 +2389,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -2671,7 +2461,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -2743,7 +2533,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -2816,7 +2606,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -2889,7 +2679,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -2961,7 +2751,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -3044,7 +2834,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -3127,7 +2917,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -3210,7 +3000,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
@@ -3303,7 +3093,7 @@
      *                   first array to be tested
      * @param aToIndex the index (exclusive) of the last element in the
      *                 first array to be tested
-     * @param b the second array to be tested fro equality
+     * @param b the second array to be tested for equality
      * @param bFromIndex the index (inclusive) of the first element in the
      *                   second array to be tested
      * @param bToIndex the index (exclusive) of the last element in the
--- a/src/java.base/share/classes/java/util/ArraysParallelSortHelpers.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/java/util/ArraysParallelSortHelpers.java	Thu Nov 14 13:50:03 2019 +0000
@@ -24,7 +24,6 @@
  */
 package java.util;
 
-import java.util.concurrent.RecursiveAction;
 import java.util.concurrent.CountedCompleter;
 
 /**
@@ -36,7 +35,7 @@
  * Sorter classes based mainly on CilkSort
  * <A href="http://supertech.lcs.mit.edu/cilk/"> Cilk</A>:
  * Basic algorithm:
- * if array size is small, just use a sequential quicksort (via Arrays.sort)
+ * if array size is small, just use a sequential sort (via Arrays.sort)
  *         Otherwise:
  *         1. Break array in half.
  *         2. For each half,
@@ -63,14 +62,10 @@
  * need to keep track of the arrays, and are never themselves forked,
  * so don't hold any task state.
  *
- * The primitive class versions (FJByte... FJDouble) are
- * identical to each other except for type declarations.
- *
  * The base sequential sorts rely on non-public versions of TimSort,
- * ComparableTimSort, and DualPivotQuicksort sort methods that accept
- * temp workspace array slices that we will have already allocated, so
- * avoids redundant allocation. (Except for DualPivotQuicksort byte[]
- * sort, that does not ever use a workspace array.)
+ * ComparableTimSort sort methods that accept temp workspace array
+ * slices that we will have already allocated, so avoids redundant
+ * allocation.
  */
 /*package*/ class ArraysParallelSortHelpers {
 
@@ -142,7 +137,7 @@
                     Relay rc = new Relay(new Merger<>(fc, a, w, b+h, q,
                                                       b+u, n-u, wb+h, g, c));
                     new Sorter<>(rc, a, w, b+u, n-u, wb+u, g, c).fork();
-                    new Sorter<>(rc, a, w, b+h, q, wb+h, g, c).fork();;
+                    new Sorter<>(rc, a, w, b+h, q, wb+h, g, c).fork();
                     Relay bc = new Relay(new Merger<>(fc, a, w, b, q,
                                                       b+q, h-q, wb, g, c));
                     new Sorter<>(bc, a, w, b+q, h-q, wb+q, g, c).fork();
@@ -239,799 +234,6 @@
 
                 tryComplete();
             }
-
         }
-    } // FJObject
-
-    /** byte support class */
-    static final class FJByte {
-        static final class Sorter extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final byte[] a, w;
-            final int base, size, wbase, gran;
-            Sorter(CountedCompleter<?> par, byte[] a, byte[] w, int base,
-                   int size, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w; this.base = base; this.size = size;
-                this.wbase = wbase; this.gran = gran;
-            }
-            public final void compute() {
-                CountedCompleter<?> s = this;
-                byte[] a = this.a, w = this.w; // localize all params
-                int b = this.base, n = this.size, wb = this.wbase, g = this.gran;
-                while (n > g) {
-                    int h = n >>> 1, q = h >>> 1, u = h + q; // quartiles
-                    Relay fc = new Relay(new Merger(s, w, a, wb, h,
-                                                    wb+h, n-h, b, g));
-                    Relay rc = new Relay(new Merger(fc, a, w, b+h, q,
-                                                    b+u, n-u, wb+h, g));
-                    new Sorter(rc, a, w, b+u, n-u, wb+u, g).fork();
-                    new Sorter(rc, a, w, b+h, q, wb+h, g).fork();;
-                    Relay bc = new Relay(new Merger(fc, a, w, b, q,
-                                                    b+q, h-q, wb, g));
-                    new Sorter(bc, a, w, b+q, h-q, wb+q, g).fork();
-                    s = new EmptyCompleter(bc);
-                    n = q;
-                }
-                DualPivotQuicksort.sort(a, b, b + n - 1);
-                s.tryComplete();
-            }
-        }
-
-        static final class Merger extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final byte[] a, w; // main and workspace arrays
-            final int lbase, lsize, rbase, rsize, wbase, gran;
-            Merger(CountedCompleter<?> par, byte[] a, byte[] w,
-                   int lbase, int lsize, int rbase,
-                   int rsize, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w;
-                this.lbase = lbase; this.lsize = lsize;
-                this.rbase = rbase; this.rsize = rsize;
-                this.wbase = wbase; this.gran = gran;
-            }
-
-            public final void compute() {
-                byte[] a = this.a, w = this.w; // localize all params
-                int lb = this.lbase, ln = this.lsize, rb = this.rbase,
-                    rn = this.rsize, k = this.wbase, g = this.gran;
-                if (a == null || w == null || lb < 0 || rb < 0 || k < 0)
-                    throw new IllegalStateException(); // hoist checks
-                for (int lh, rh;;) {  // split larger, find point in smaller
-                    if (ln >= rn) {
-                        if (ln <= g)
-                            break;
-                        rh = rn;
-                        byte split = a[(lh = ln >>> 1) + lb];
-                        for (int lo = 0; lo < rh; ) {
-                            int rm = (lo + rh) >>> 1;
-                            if (split <= a[rm + rb])
-                                rh = rm;
-                            else
-                                lo = rm + 1;
-                        }
-                    }
-                    else {
-                        if (rn <= g)
-                            break;
-                        lh = ln;
-                        byte split = a[(rh = rn >>> 1) + rb];
-                        for (int lo = 0; lo < lh; ) {
-                            int lm = (lo + lh) >>> 1;
-                            if (split <= a[lm + lb])
-                                lh = lm;
-                            else
-                                lo = lm + 1;
-                        }
-                    }
-                    Merger m = new Merger(this, a, w, lb + lh, ln - lh,
-                                          rb + rh, rn - rh,
-                                          k + lh + rh, g);
-                    rn = rh;
-                    ln = lh;
-                    addToPendingCount(1);
-                    m.fork();
-                }
-
-                int lf = lb + ln, rf = rb + rn; // index bounds
-                while (lb < lf && rb < rf) {
-                    byte t, al, ar;
-                    if ((al = a[lb]) <= (ar = a[rb])) {
-                        lb++; t = al;
-                    }
-                    else {
-                        rb++; t = ar;
-                    }
-                    w[k++] = t;
-                }
-                if (rb < rf)
-                    System.arraycopy(a, rb, w, k, rf - rb);
-                else if (lb < lf)
-                    System.arraycopy(a, lb, w, k, lf - lb);
-                tryComplete();
-            }
-        }
-    } // FJByte
-
-    /** char support class */
-    static final class FJChar {
-        static final class Sorter extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final char[] a, w;
-            final int base, size, wbase, gran;
-            Sorter(CountedCompleter<?> par, char[] a, char[] w, int base,
-                   int size, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w; this.base = base; this.size = size;
-                this.wbase = wbase; this.gran = gran;
-            }
-            public final void compute() {
-                CountedCompleter<?> s = this;
-                char[] a = this.a, w = this.w; // localize all params
-                int b = this.base, n = this.size, wb = this.wbase, g = this.gran;
-                while (n > g) {
-                    int h = n >>> 1, q = h >>> 1, u = h + q; // quartiles
-                    Relay fc = new Relay(new Merger(s, w, a, wb, h,
-                                                    wb+h, n-h, b, g));
-                    Relay rc = new Relay(new Merger(fc, a, w, b+h, q,
-                                                    b+u, n-u, wb+h, g));
-                    new Sorter(rc, a, w, b+u, n-u, wb+u, g).fork();
-                    new Sorter(rc, a, w, b+h, q, wb+h, g).fork();;
-                    Relay bc = new Relay(new Merger(fc, a, w, b, q,
-                                                    b+q, h-q, wb, g));
-                    new Sorter(bc, a, w, b+q, h-q, wb+q, g).fork();
-                    s = new EmptyCompleter(bc);
-                    n = q;
-                }
-                DualPivotQuicksort.sort(a, b, b + n - 1, w, wb, n);
-                s.tryComplete();
-            }
-        }
-
-        static final class Merger extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final char[] a, w; // main and workspace arrays
-            final int lbase, lsize, rbase, rsize, wbase, gran;
-            Merger(CountedCompleter<?> par, char[] a, char[] w,
-                   int lbase, int lsize, int rbase,
-                   int rsize, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w;
-                this.lbase = lbase; this.lsize = lsize;
-                this.rbase = rbase; this.rsize = rsize;
-                this.wbase = wbase; this.gran = gran;
-            }
-
-            public final void compute() {
-                char[] a = this.a, w = this.w; // localize all params
-                int lb = this.lbase, ln = this.lsize, rb = this.rbase,
-                    rn = this.rsize, k = this.wbase, g = this.gran;
-                if (a == null || w == null || lb < 0 || rb < 0 || k < 0)
-                    throw new IllegalStateException(); // hoist checks
-                for (int lh, rh;;) {  // split larger, find point in smaller
-                    if (ln >= rn) {
-                        if (ln <= g)
-                            break;
-                        rh = rn;
-                        char split = a[(lh = ln >>> 1) + lb];
-                        for (int lo = 0; lo < rh; ) {
-                            int rm = (lo + rh) >>> 1;
-                            if (split <= a[rm + rb])
-                                rh = rm;
-                            else
-                                lo = rm + 1;
-                        }
-                    }
-                    else {
-                        if (rn <= g)
-                            break;
-                        lh = ln;
-                        char split = a[(rh = rn >>> 1) + rb];
-                        for (int lo = 0; lo < lh; ) {
-                            int lm = (lo + lh) >>> 1;
-                            if (split <= a[lm + lb])
-                                lh = lm;
-                            else
-                                lo = lm + 1;
-                        }
-                    }
-                    Merger m = new Merger(this, a, w, lb + lh, ln - lh,
-                                          rb + rh, rn - rh,
-                                          k + lh + rh, g);
-                    rn = rh;
-                    ln = lh;
-                    addToPendingCount(1);
-                    m.fork();
-                }
-
-                int lf = lb + ln, rf = rb + rn; // index bounds
-                while (lb < lf && rb < rf) {
-                    char t, al, ar;
-                    if ((al = a[lb]) <= (ar = a[rb])) {
-                        lb++; t = al;
-                    }
-                    else {
-                        rb++; t = ar;
-                    }
-                    w[k++] = t;
-                }
-                if (rb < rf)
-                    System.arraycopy(a, rb, w, k, rf - rb);
-                else if (lb < lf)
-                    System.arraycopy(a, lb, w, k, lf - lb);
-                tryComplete();
-            }
-        }
-    } // FJChar
-
-    /** short support class */
-    static final class FJShort {
-        static final class Sorter extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final short[] a, w;
-            final int base, size, wbase, gran;
-            Sorter(CountedCompleter<?> par, short[] a, short[] w, int base,
-                   int size, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w; this.base = base; this.size = size;
-                this.wbase = wbase; this.gran = gran;
-            }
-            public final void compute() {
-                CountedCompleter<?> s = this;
-                short[] a = this.a, w = this.w; // localize all params
-                int b = this.base, n = this.size, wb = this.wbase, g = this.gran;
-                while (n > g) {
-                    int h = n >>> 1, q = h >>> 1, u = h + q; // quartiles
-                    Relay fc = new Relay(new Merger(s, w, a, wb, h,
-                                                    wb+h, n-h, b, g));
-                    Relay rc = new Relay(new Merger(fc, a, w, b+h, q,
-                                                    b+u, n-u, wb+h, g));
-                    new Sorter(rc, a, w, b+u, n-u, wb+u, g).fork();
-                    new Sorter(rc, a, w, b+h, q, wb+h, g).fork();;
-                    Relay bc = new Relay(new Merger(fc, a, w, b, q,
-                                                    b+q, h-q, wb, g));
-                    new Sorter(bc, a, w, b+q, h-q, wb+q, g).fork();
-                    s = new EmptyCompleter(bc);
-                    n = q;
-                }
-                DualPivotQuicksort.sort(a, b, b + n - 1, w, wb, n);
-                s.tryComplete();
-            }
-        }
-
-        static final class Merger extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final short[] a, w; // main and workspace arrays
-            final int lbase, lsize, rbase, rsize, wbase, gran;
-            Merger(CountedCompleter<?> par, short[] a, short[] w,
-                   int lbase, int lsize, int rbase,
-                   int rsize, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w;
-                this.lbase = lbase; this.lsize = lsize;
-                this.rbase = rbase; this.rsize = rsize;
-                this.wbase = wbase; this.gran = gran;
-            }
-
-            public final void compute() {
-                short[] a = this.a, w = this.w; // localize all params
-                int lb = this.lbase, ln = this.lsize, rb = this.rbase,
-                    rn = this.rsize, k = this.wbase, g = this.gran;
-                if (a == null || w == null || lb < 0 || rb < 0 || k < 0)
-                    throw new IllegalStateException(); // hoist checks
-                for (int lh, rh;;) {  // split larger, find point in smaller
-                    if (ln >= rn) {
-                        if (ln <= g)
-                            break;
-                        rh = rn;
-                        short split = a[(lh = ln >>> 1) + lb];
-                        for (int lo = 0; lo < rh; ) {
-                            int rm = (lo + rh) >>> 1;
-                            if (split <= a[rm + rb])
-                                rh = rm;
-                            else
-                                lo = rm + 1;
-                        }
-                    }
-                    else {
-                        if (rn <= g)
-                            break;
-                        lh = ln;
-                        short split = a[(rh = rn >>> 1) + rb];
-                        for (int lo = 0; lo < lh; ) {
-                            int lm = (lo + lh) >>> 1;
-                            if (split <= a[lm + lb])
-                                lh = lm;
-                            else
-                                lo = lm + 1;
-                        }
-                    }
-                    Merger m = new Merger(this, a, w, lb + lh, ln - lh,
-                                          rb + rh, rn - rh,
-                                          k + lh + rh, g);
-                    rn = rh;
-                    ln = lh;
-                    addToPendingCount(1);
-                    m.fork();
-                }
-
-                int lf = lb + ln, rf = rb + rn; // index bounds
-                while (lb < lf && rb < rf) {
-                    short t, al, ar;
-                    if ((al = a[lb]) <= (ar = a[rb])) {
-                        lb++; t = al;
-                    }
-                    else {
-                        rb++; t = ar;
-                    }
-                    w[k++] = t;
-                }
-                if (rb < rf)
-                    System.arraycopy(a, rb, w, k, rf - rb);
-                else if (lb < lf)
-                    System.arraycopy(a, lb, w, k, lf - lb);
-                tryComplete();
-            }
-        }
-    } // FJShort
-
-    /** int support class */
-    static final class FJInt {
-        static final class Sorter extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final int[] a, w;
-            final int base, size, wbase, gran;
-            Sorter(CountedCompleter<?> par, int[] a, int[] w, int base,
-                   int size, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w; this.base = base; this.size = size;
-                this.wbase = wbase; this.gran = gran;
-            }
-            public final void compute() {
-                CountedCompleter<?> s = this;
-                int[] a = this.a, w = this.w; // localize all params
-                int b = this.base, n = this.size, wb = this.wbase, g = this.gran;
-                while (n > g) {
-                    int h = n >>> 1, q = h >>> 1, u = h + q; // quartiles
-                    Relay fc = new Relay(new Merger(s, w, a, wb, h,
-                                                    wb+h, n-h, b, g));
-                    Relay rc = new Relay(new Merger(fc, a, w, b+h, q,
-                                                    b+u, n-u, wb+h, g));
-                    new Sorter(rc, a, w, b+u, n-u, wb+u, g).fork();
-                    new Sorter(rc, a, w, b+h, q, wb+h, g).fork();;
-                    Relay bc = new Relay(new Merger(fc, a, w, b, q,
-                                                    b+q, h-q, wb, g));
-                    new Sorter(bc, a, w, b+q, h-q, wb+q, g).fork();
-                    s = new EmptyCompleter(bc);
-                    n = q;
-                }
-                DualPivotQuicksort.sort(a, b, b + n - 1, w, wb, n);
-                s.tryComplete();
-            }
-        }
-
-        static final class Merger extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final int[] a, w; // main and workspace arrays
-            final int lbase, lsize, rbase, rsize, wbase, gran;
-            Merger(CountedCompleter<?> par, int[] a, int[] w,
-                   int lbase, int lsize, int rbase,
-                   int rsize, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w;
-                this.lbase = lbase; this.lsize = lsize;
-                this.rbase = rbase; this.rsize = rsize;
-                this.wbase = wbase; this.gran = gran;
-            }
-
-            public final void compute() {
-                int[] a = this.a, w = this.w; // localize all params
-                int lb = this.lbase, ln = this.lsize, rb = this.rbase,
-                    rn = this.rsize, k = this.wbase, g = this.gran;
-                if (a == null || w == null || lb < 0 || rb < 0 || k < 0)
-                    throw new IllegalStateException(); // hoist checks
-                for (int lh, rh;;) {  // split larger, find point in smaller
-                    if (ln >= rn) {
-                        if (ln <= g)
-                            break;
-                        rh = rn;
-                        int split = a[(lh = ln >>> 1) + lb];
-                        for (int lo = 0; lo < rh; ) {
-                            int rm = (lo + rh) >>> 1;
-                            if (split <= a[rm + rb])
-                                rh = rm;
-                            else
-                                lo = rm + 1;
-                        }
-                    }
-                    else {
-                        if (rn <= g)
-                            break;
-                        lh = ln;
-                        int split = a[(rh = rn >>> 1) + rb];
-                        for (int lo = 0; lo < lh; ) {
-                            int lm = (lo + lh) >>> 1;
-                            if (split <= a[lm + lb])
-                                lh = lm;
-                            else
-                                lo = lm + 1;
-                        }
-                    }
-                    Merger m = new Merger(this, a, w, lb + lh, ln - lh,
-                                          rb + rh, rn - rh,
-                                          k + lh + rh, g);
-                    rn = rh;
-                    ln = lh;
-                    addToPendingCount(1);
-                    m.fork();
-                }
-
-                int lf = lb + ln, rf = rb + rn; // index bounds
-                while (lb < lf && rb < rf) {
-                    int t, al, ar;
-                    if ((al = a[lb]) <= (ar = a[rb])) {
-                        lb++; t = al;
-                    }
-                    else {
-                        rb++; t = ar;
-                    }
-                    w[k++] = t;
-                }
-                if (rb < rf)
-                    System.arraycopy(a, rb, w, k, rf - rb);
-                else if (lb < lf)
-                    System.arraycopy(a, lb, w, k, lf - lb);
-                tryComplete();
-            }
-        }
-    } // FJInt
-
-    /** long support class */
-    static final class FJLong {
-        static final class Sorter extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final long[] a, w;
-            final int base, size, wbase, gran;
-            Sorter(CountedCompleter<?> par, long[] a, long[] w, int base,
-                   int size, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w; this.base = base; this.size = size;
-                this.wbase = wbase; this.gran = gran;
-            }
-            public final void compute() {
-                CountedCompleter<?> s = this;
-                long[] a = this.a, w = this.w; // localize all params
-                int b = this.base, n = this.size, wb = this.wbase, g = this.gran;
-                while (n > g) {
-                    int h = n >>> 1, q = h >>> 1, u = h + q; // quartiles
-                    Relay fc = new Relay(new Merger(s, w, a, wb, h,
-                                                    wb+h, n-h, b, g));
-                    Relay rc = new Relay(new Merger(fc, a, w, b+h, q,
-                                                    b+u, n-u, wb+h, g));
-                    new Sorter(rc, a, w, b+u, n-u, wb+u, g).fork();
-                    new Sorter(rc, a, w, b+h, q, wb+h, g).fork();;
-                    Relay bc = new Relay(new Merger(fc, a, w, b, q,
-                                                    b+q, h-q, wb, g));
-                    new Sorter(bc, a, w, b+q, h-q, wb+q, g).fork();
-                    s = new EmptyCompleter(bc);
-                    n = q;
-                }
-                DualPivotQuicksort.sort(a, b, b + n - 1, w, wb, n);
-                s.tryComplete();
-            }
-        }
-
-        static final class Merger extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final long[] a, w; // main and workspace arrays
-            final int lbase, lsize, rbase, rsize, wbase, gran;
-            Merger(CountedCompleter<?> par, long[] a, long[] w,
-                   int lbase, int lsize, int rbase,
-                   int rsize, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w;
-                this.lbase = lbase; this.lsize = lsize;
-                this.rbase = rbase; this.rsize = rsize;
-                this.wbase = wbase; this.gran = gran;
-            }
-
-            public final void compute() {
-                long[] a = this.a, w = this.w; // localize all params
-                int lb = this.lbase, ln = this.lsize, rb = this.rbase,
-                    rn = this.rsize, k = this.wbase, g = this.gran;
-                if (a == null || w == null || lb < 0 || rb < 0 || k < 0)
-                    throw new IllegalStateException(); // hoist checks
-                for (int lh, rh;;) {  // split larger, find point in smaller
-                    if (ln >= rn) {
-                        if (ln <= g)
-                            break;
-                        rh = rn;
-                        long split = a[(lh = ln >>> 1) + lb];
-                        for (int lo = 0; lo < rh; ) {
-                            int rm = (lo + rh) >>> 1;
-                            if (split <= a[rm + rb])
-                                rh = rm;
-                            else
-                                lo = rm + 1;
-                        }
-                    }
-                    else {
-                        if (rn <= g)
-                            break;
-                        lh = ln;
-                        long split = a[(rh = rn >>> 1) + rb];
-                        for (int lo = 0; lo < lh; ) {
-                            int lm = (lo + lh) >>> 1;
-                            if (split <= a[lm + lb])
-                                lh = lm;
-                            else
-                                lo = lm + 1;
-                        }
-                    }
-                    Merger m = new Merger(this, a, w, lb + lh, ln - lh,
-                                          rb + rh, rn - rh,
-                                          k + lh + rh, g);
-                    rn = rh;
-                    ln = lh;
-                    addToPendingCount(1);
-                    m.fork();
-                }
-
-                int lf = lb + ln, rf = rb + rn; // index bounds
-                while (lb < lf && rb < rf) {
-                    long t, al, ar;
-                    if ((al = a[lb]) <= (ar = a[rb])) {
-                        lb++; t = al;
-                    }
-                    else {
-                        rb++; t = ar;
-                    }
-                    w[k++] = t;
-                }
-                if (rb < rf)
-                    System.arraycopy(a, rb, w, k, rf - rb);
-                else if (lb < lf)
-                    System.arraycopy(a, lb, w, k, lf - lb);
-                tryComplete();
-            }
-        }
-    } // FJLong
-
-    /** float support class */
-    static final class FJFloat {
-        static final class Sorter extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final float[] a, w;
-            final int base, size, wbase, gran;
-            Sorter(CountedCompleter<?> par, float[] a, float[] w, int base,
-                   int size, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w; this.base = base; this.size = size;
-                this.wbase = wbase; this.gran = gran;
-            }
-            public final void compute() {
-                CountedCompleter<?> s = this;
-                float[] a = this.a, w = this.w; // localize all params
-                int b = this.base, n = this.size, wb = this.wbase, g = this.gran;
-                while (n > g) {
-                    int h = n >>> 1, q = h >>> 1, u = h + q; // quartiles
-                    Relay fc = new Relay(new Merger(s, w, a, wb, h,
-                                                    wb+h, n-h, b, g));
-                    Relay rc = new Relay(new Merger(fc, a, w, b+h, q,
-                                                    b+u, n-u, wb+h, g));
-                    new Sorter(rc, a, w, b+u, n-u, wb+u, g).fork();
-                    new Sorter(rc, a, w, b+h, q, wb+h, g).fork();;
-                    Relay bc = new Relay(new Merger(fc, a, w, b, q,
-                                                    b+q, h-q, wb, g));
-                    new Sorter(bc, a, w, b+q, h-q, wb+q, g).fork();
-                    s = new EmptyCompleter(bc);
-                    n = q;
-                }
-                DualPivotQuicksort.sort(a, b, b + n - 1, w, wb, n);
-                s.tryComplete();
-            }
-        }
-
-        static final class Merger extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final float[] a, w; // main and workspace arrays
-            final int lbase, lsize, rbase, rsize, wbase, gran;
-            Merger(CountedCompleter<?> par, float[] a, float[] w,
-                   int lbase, int lsize, int rbase,
-                   int rsize, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w;
-                this.lbase = lbase; this.lsize = lsize;
-                this.rbase = rbase; this.rsize = rsize;
-                this.wbase = wbase; this.gran = gran;
-            }
-
-            public final void compute() {
-                float[] a = this.a, w = this.w; // localize all params
-                int lb = this.lbase, ln = this.lsize, rb = this.rbase,
-                    rn = this.rsize, k = this.wbase, g = this.gran;
-                if (a == null || w == null || lb < 0 || rb < 0 || k < 0)
-                    throw new IllegalStateException(); // hoist checks
-                for (int lh, rh;;) {  // split larger, find point in smaller
-                    if (ln >= rn) {
-                        if (ln <= g)
-                            break;
-                        rh = rn;
-                        float split = a[(lh = ln >>> 1) + lb];
-                        for (int lo = 0; lo < rh; ) {
-                            int rm = (lo + rh) >>> 1;
-                            if (split <= a[rm + rb])
-                                rh = rm;
-                            else
-                                lo = rm + 1;
-                        }
-                    }
-                    else {
-                        if (rn <= g)
-                            break;
-                        lh = ln;
-                        float split = a[(rh = rn >>> 1) + rb];
-                        for (int lo = 0; lo < lh; ) {
-                            int lm = (lo + lh) >>> 1;
-                            if (split <= a[lm + lb])
-                                lh = lm;
-                            else
-                                lo = lm + 1;
-                        }
-                    }
-                    Merger m = new Merger(this, a, w, lb + lh, ln - lh,
-                                          rb + rh, rn - rh,
-                                          k + lh + rh, g);
-                    rn = rh;
-                    ln = lh;
-                    addToPendingCount(1);
-                    m.fork();
-                }
-
-                int lf = lb + ln, rf = rb + rn; // index bounds
-                while (lb < lf && rb < rf) {
-                    float t, al, ar;
-                    if ((al = a[lb]) <= (ar = a[rb])) {
-                        lb++; t = al;
-                    }
-                    else {
-                        rb++; t = ar;
-                    }
-                    w[k++] = t;
-                }
-                if (rb < rf)
-                    System.arraycopy(a, rb, w, k, rf - rb);
-                else if (lb < lf)
-                    System.arraycopy(a, lb, w, k, lf - lb);
-                tryComplete();
-            }
-        }
-    } // FJFloat
-
-    /** double support class */
-    static final class FJDouble {
-        static final class Sorter extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final double[] a, w;
-            final int base, size, wbase, gran;
-            Sorter(CountedCompleter<?> par, double[] a, double[] w, int base,
-                   int size, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w; this.base = base; this.size = size;
-                this.wbase = wbase; this.gran = gran;
-            }
-            public final void compute() {
-                CountedCompleter<?> s = this;
-                double[] a = this.a, w = this.w; // localize all params
-                int b = this.base, n = this.size, wb = this.wbase, g = this.gran;
-                while (n > g) {
-                    int h = n >>> 1, q = h >>> 1, u = h + q; // quartiles
-                    Relay fc = new Relay(new Merger(s, w, a, wb, h,
-                                                    wb+h, n-h, b, g));
-                    Relay rc = new Relay(new Merger(fc, a, w, b+h, q,
-                                                    b+u, n-u, wb+h, g));
-                    new Sorter(rc, a, w, b+u, n-u, wb+u, g).fork();
-                    new Sorter(rc, a, w, b+h, q, wb+h, g).fork();;
-                    Relay bc = new Relay(new Merger(fc, a, w, b, q,
-                                                    b+q, h-q, wb, g));
-                    new Sorter(bc, a, w, b+q, h-q, wb+q, g).fork();
-                    s = new EmptyCompleter(bc);
-                    n = q;
-                }
-                DualPivotQuicksort.sort(a, b, b + n - 1, w, wb, n);
-                s.tryComplete();
-            }
-        }
-
-        static final class Merger extends CountedCompleter<Void> {
-            @java.io.Serial
-            static final long serialVersionUID = 2446542900576103244L;
-            final double[] a, w; // main and workspace arrays
-            final int lbase, lsize, rbase, rsize, wbase, gran;
-            Merger(CountedCompleter<?> par, double[] a, double[] w,
-                   int lbase, int lsize, int rbase,
-                   int rsize, int wbase, int gran) {
-                super(par);
-                this.a = a; this.w = w;
-                this.lbase = lbase; this.lsize = lsize;
-                this.rbase = rbase; this.rsize = rsize;
-                this.wbase = wbase; this.gran = gran;
-            }
-
-            public final void compute() {
-                double[] a = this.a, w = this.w; // localize all params
-                int lb = this.lbase, ln = this.lsize, rb = this.rbase,
-                    rn = this.rsize, k = this.wbase, g = this.gran;
-                if (a == null || w == null || lb < 0 || rb < 0 || k < 0)
-                    throw new IllegalStateException(); // hoist checks
-                for (int lh, rh;;) {  // split larger, find point in smaller
-                    if (ln >= rn) {
-                        if (ln <= g)
-                            break;
-                        rh = rn;
-                        double split = a[(lh = ln >>> 1) + lb];
-                        for (int lo = 0; lo < rh; ) {
-                            int rm = (lo + rh) >>> 1;
-                            if (split <= a[rm + rb])
-                                rh = rm;
-                            else
-                                lo = rm + 1;
-                        }
-                    }
-                    else {
-                        if (rn <= g)
-                            break;
-                        lh = ln;
-                        double split = a[(rh = rn >>> 1) + rb];
-                        for (int lo = 0; lo < lh; ) {
-                            int lm = (lo + lh) >>> 1;
-                            if (split <= a[lm + lb])
-                                lh = lm;
-                            else
-                                lo = lm + 1;
-                        }
-                    }
-                    Merger m = new Merger(this, a, w, lb + lh, ln - lh,
-                                          rb + rh, rn - rh,
-                                          k + lh + rh, g);
-                    rn = rh;
-                    ln = lh;
-                    addToPendingCount(1);
-                    m.fork();
-                }
-
-                int lf = lb + ln, rf = rb + rn; // index bounds
-                while (lb < lf && rb < rf) {
-                    double t, al, ar;
-                    if ((al = a[lb]) <= (ar = a[rb])) {
-                        lb++; t = al;
-                    }
-                    else {
-                        rb++; t = ar;
-                    }
-                    w[k++] = t;
-                }
-                if (rb < rf)
-                    System.arraycopy(a, rb, w, k, rf - rb);
-                else if (lb < lf)
-                    System.arraycopy(a, lb, w, k, lf - lb);
-                tryComplete();
-            }
-        }
-    } // FJDouble
-
+    }
 }
--- a/src/java.base/share/classes/java/util/DualPivotQuicksort.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/java/util/DualPivotQuicksort.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,24 +25,28 @@
 
 package java.util;
 
+import java.util.concurrent.CountedCompleter;
+import java.util.concurrent.RecursiveTask;
+
 /**
- * This class implements the Dual-Pivot Quicksort algorithm by
- * Vladimir Yaroslavskiy, Jon Bentley, and Josh Bloch. The algorithm
- * offers O(n log(n)) performance on many data sets that cause other
- * quicksorts to degrade to quadratic performance, and is typically
+ * This class implements powerful and fully optimized versions, both
+ * sequential and parallel, of the Dual-Pivot Quicksort algorithm by
+ * Vladimir Yaroslavskiy, Jon Bentley and Josh Bloch. This algorithm
+ * offers O(n log(n)) performance on all data sets, and is typically
  * faster than traditional (one-pivot) Quicksort implementations.
  *
- * All exposed methods are package-private, designed to be invoked
- * from public methods (in class Arrays) after performing any
- * necessary array bounds checks and expanding parameters into the
- * required forms.
+ * There are also additional algorithms, invoked from the Dual-Pivot
+ * Quicksort, such as mixed insertion sort, merging of runs and heap
+ * sort, counting sort and parallel merge sort.
  *
  * @author Vladimir Yaroslavskiy
  * @author Jon Bentley
  * @author Josh Bloch
+ * @author Doug Lea
  *
- * @version 2011.02.11 m765.827.12i:5\7pm
- * @since 1.7
+ * @version 2018.08.18
+ *
+ * @since 1.7 * 14
  */
 final class DualPivotQuicksort {
 
@@ -51,3131 +55,4107 @@
      */
     private DualPivotQuicksort() {}
 
-    /*
-     * Tuning parameters.
+    /**
+     * Max array size to use mixed insertion sort.
+     */
+    private static final int MAX_MIXED_INSERTION_SORT_SIZE = 65;
+
+    /**
+     * Max array size to use insertion sort.
      */
+    private static final int MAX_INSERTION_SORT_SIZE = 44;
+
+    /**
+     * Min array size to perform sorting in parallel.
+     */
+    private static final int MIN_PARALLEL_SORT_SIZE = 4 << 10;
+
+    /**
+     * Min array size to try merging of runs.
+     */
+    private static final int MIN_TRY_MERGE_SIZE = 4 << 10;
 
     /**
-     * The maximum number of runs in merge sort.
+     * Min size of the first run to continue with scanning.
      */
-    private static final int MAX_RUN_COUNT = 67;
+    private static final int MIN_FIRST_RUN_SIZE = 16;
+
+    /**
+     * Min factor for the first runs to continue scanning.
+     */
+    private static final int MIN_FIRST_RUNS_FACTOR = 7;
 
     /**
-     * If the length of an array to be sorted is less than this
-     * constant, Quicksort is used in preference to merge sort.
+     * Max capacity of the index array for tracking runs.
+     */
+    private static final int MAX_RUN_CAPACITY = 5 << 10;
+
+    /**
+     * Min number of runs, required by parallel merging.
      */
-    private static final int QUICKSORT_THRESHOLD = 286;
+    private static final int MIN_RUN_COUNT = 4;
+
+    /**
+     * Min array size to use parallel merging of parts.
+     */
+    private static final int MIN_PARALLEL_MERGE_PARTS_SIZE = 4 << 10;
 
     /**
-     * If the length of an array to be sorted is less than this
-     * constant, insertion sort is used in preference to Quicksort.
+     * Min size of a byte array to use counting sort.
      */
-    private static final int INSERTION_SORT_THRESHOLD = 47;
+    private static final int MIN_BYTE_COUNTING_SORT_SIZE = 64;
 
     /**
-     * If the length of a byte array to be sorted is greater than this
-     * constant, counting sort is used in preference to insertion sort.
+     * Min size of a short or char array to use counting sort.
+     */
+    private static final int MIN_SHORT_OR_CHAR_COUNTING_SORT_SIZE = 1750;
+
+    /**
+     * Threshold of mixed insertion sort is incremented by this value.
      */
-    private static final int COUNTING_SORT_THRESHOLD_FOR_BYTE = 29;
+    private static final int DELTA = 3 << 1;
+
+    /**
+     * Max recursive partitioning depth before using heap sort.
+     */
+    private static final int MAX_RECURSION_DEPTH = 64 * DELTA;
 
     /**
-     * If the length of a short or char array to be sorted is greater
-     * than this constant, counting sort is used in preference to Quicksort.
+     * Calculates the double depth of parallel merging.
+     * Depth is negative, if tasks split before sorting.
+     *
+     * @param parallelism the parallelism level
+     * @param size the target size
+     * @return the depth of parallel merging
      */
-    private static final int COUNTING_SORT_THRESHOLD_FOR_SHORT_OR_CHAR = 3200;
-
-    /*
-     * Sorting methods for seven primitive types.
-     */
+    private static int getDepth(int parallelism, int size) {
+        int depth = 0;
+
+        while ((parallelism >>= 3) > 0 && (size >>= 2) > 0) {
+            depth -= 2;
+        }
+        return depth;
+    }
 
     /**
-     * Sorts the specified range of the array using the given
-     * workspace array slice if possible for merging
+     * Sorts the specified range of the array using parallel merge
+     * sort and/or Dual-Pivot Quicksort.
+     *
+     * To balance the faster splitting and parallelism of merge sort
+     * with the faster element partitioning of Quicksort, ranges are
+     * subdivided in tiers such that, if there is enough parallelism,
+     * the four-way parallel merge is started, still ensuring enough
+     * parallelism to process the partitions.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param parallelism the parallelism level
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    static void sort(int[] a, int left, int right,
-                     int[] work, int workBase, int workLen) {
-        // Use Quicksort on small arrays
-        if (right - left < QUICKSORT_THRESHOLD) {
-            sort(a, left, right, true);
-            return;
+    static void sort(int[] a, int parallelism, int low, int high) {
+        int size = high - low;
+
+        if (parallelism > 1 && size > MIN_PARALLEL_SORT_SIZE) {
+            int depth = getDepth(parallelism, size >> 12);
+            int[] b = depth == 0 ? null : new int[size];
+            new Sorter(null, a, b, low, size, low, depth).invoke();
+        } else {
+            sort(null, a, 0, low, high);
         }
-
-        /*
-         * Index run[i] is the start of i-th run
-         * (ascending or descending sequence).
-         */
-        int[] run = new int[MAX_RUN_COUNT + 1];
-        int count = 0; run[0] = left;
-
-        // Check if the array is nearly sorted
-        for (int k = left; k < right; run[count] = k) {
-            // Equal items in the beginning of the sequence
-            while (k < right && a[k] == a[k + 1])
-                k++;
-            if (k == right) break;  // Sequence finishes with equal items
-            if (a[k] < a[k + 1]) { // ascending
-                while (++k <= right && a[k - 1] <= a[k]);
-            } else if (a[k] > a[k + 1]) { // descending
-                while (++k <= right && a[k - 1] >= a[k]);
-                // Transform into an ascending sequence
-                for (int lo = run[count] - 1, hi = k; ++lo < --hi; ) {
-                    int t = a[lo]; a[lo] = a[hi]; a[hi] = t;
-                }
+    }
+
+    /**
+     * Sorts the specified array using the Dual-Pivot Quicksort and/or
+     * other sorts in special-cases, possibly with parallel partitions.
+     *
+     * @param sorter parallel context
+     * @param a the array to be sorted
+     * @param bits the combination of recursion depth and bit flag, where
+     *        the right bit "0" indicates that array is the leftmost part
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(Sorter sorter, int[] a, int bits, int low, int high) {
+        while (true) {
+            int end = high - 1, size = high - low;
+
+            /*
+             * Run mixed insertion sort on small non-leftmost parts.
+             */
+            if (size < MAX_MIXED_INSERTION_SORT_SIZE + bits && (bits & 1) > 0) {
+                mixedInsertionSort(a, low, high - 3 * ((size >> 5) << 3), high);
+                return;
             }
 
-            // Merge a transformed descending sequence followed by an
-            // ascending sequence
-            if (run[count] > left && a[run[count]] >= a[run[count] - 1]) {
-                count--;
+            /*
+             * Invoke insertion sort on small leftmost part.
+             */
+            if (size < MAX_INSERTION_SORT_SIZE) {
+                insertionSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Check if the whole array or large non-leftmost
+             * parts are nearly sorted and then merge runs.
+             */
+            if ((bits == 0 || size > MIN_TRY_MERGE_SIZE && (bits & 1) > 0)
+                    && tryMergeRuns(sorter, a, low, size)) {
+                return;
+            }
+
+            /*
+             * Switch to heap sort if execution
+             * time is becoming quadratic.
+             */
+            if ((bits += DELTA) > MAX_RECURSION_DEPTH) {
+                heapSort(a, low, high);
+                return;
             }
 
             /*
-             * The array is not highly structured,
-             * use Quicksort instead of merge sort.
+             * Use an inexpensive approximation of the golden ratio
+             * to select five sample elements and determine pivots.
+             */
+            int step = (size >> 3) * 3 + 3;
+
+            /*
+             * Five elements around (and including) the central element
+             * will be used for pivot selection as described below. The
+             * unequal choice of spacing these elements was empirically
+             * determined to work well on a wide variety of inputs.
              */
-            if (++count == MAX_RUN_COUNT) {
-                sort(a, left, right, true);
-                return;
+            int e1 = low + step;
+            int e5 = end - step;
+            int e3 = (e1 + e5) >>> 1;
+            int e2 = (e1 + e3) >>> 1;
+            int e4 = (e3 + e5) >>> 1;
+            int a3 = a[e3];
+
+            /*
+             * Sort these elements in place by the combination
+             * of 4-element sorting network and insertion sort.
+             *
+             *    5 ------o-----------o------------
+             *            |           |
+             *    4 ------|-----o-----o-----o------
+             *            |     |           |
+             *    2 ------o-----|-----o-----o------
+             *                  |     |
+             *    1 ------------o-----o------------
+             */
+            if (a[e5] < a[e2]) { int t = a[e5]; a[e5] = a[e2]; a[e2] = t; }
+            if (a[e4] < a[e1]) { int t = a[e4]; a[e4] = a[e1]; a[e1] = t; }
+            if (a[e5] < a[e4]) { int t = a[e5]; a[e5] = a[e4]; a[e4] = t; }
+            if (a[e2] < a[e1]) { int t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
+            if (a[e4] < a[e2]) { int t = a[e4]; a[e4] = a[e2]; a[e2] = t; }
+
+            if (a3 < a[e2]) {
+                if (a3 < a[e1]) {
+                    a[e3] = a[e2]; a[e2] = a[e1]; a[e1] = a3;
+                } else {
+                    a[e3] = a[e2]; a[e2] = a3;
+                }
+            } else if (a3 > a[e4]) {
+                if (a3 > a[e5]) {
+                    a[e3] = a[e4]; a[e4] = a[e5]; a[e5] = a3;
+                } else {
+                    a[e3] = a[e4]; a[e4] = a3;
+                }
             }
-        }
-
-        // These invariants should hold true:
-        //    run[0] = 0
-        //    run[<last>] = right + 1; (terminator)
-
-        if (count == 0) {
-            // A single equal run
-            return;
-        } else if (count == 1 && run[count] > right) {
-            // Either a single ascending or a transformed descending run.
-            // Always check that a final run is a proper terminator, otherwise
-            // we have an unterminated trailing run, to handle downstream.
-            return;
-        }
-        right++;
-        if (run[count] < right) {
-            // Corner case: the final run is not a terminator. This may happen
-            // if a final run is an equals run, or there is a single-element run
-            // at the end. Fix up by adding a proper terminator at the end.
-            // Note that we terminate with (right + 1), incremented earlier.
-            run[++count] = right;
-        }
-
-        // Determine alternation base for merge
-        byte odd = 0;
-        for (int n = 1; (n <<= 1) < count; odd ^= 1);
-
-        // Use or create temporary array b for merging
-        int[] b;                 // temp array; alternates with a
-        int ao, bo;              // array offsets from 'left'
-        int blen = right - left; // space needed for b
-        if (work == null || workLen < blen || workBase + blen > work.length) {
-            work = new int[blen];
-            workBase = 0;
-        }
-        if (odd == 0) {
-            System.arraycopy(a, left, work, workBase, blen);
-            b = a;
-            bo = 0;
-            a = work;
-            ao = workBase - left;
-        } else {
-            b = work;
-            ao = 0;
-            bo = workBase - left;
-        }
-
-        // Merging
-        for (int last; count > 1; count = last) {
-            for (int k = (last = 0) + 2; k <= count; k += 2) {
-                int hi = run[k], mi = run[k - 1];
-                for (int i = run[k - 2], p = i, q = mi; i < hi; ++i) {
-                    if (q >= hi || p < mi && a[p + ao] <= a[q + ao]) {
-                        b[i + bo] = a[p++ + ao];
-                    } else {
-                        b[i + bo] = a[q++ + ao];
+
+            // Pointers
+            int lower = low; // The index of the last element of the left part
+            int upper = end; // The index of the first element of the right part
+
+            /*
+             * Partitioning with 2 pivots in case of different elements.
+             */
+            if (a[e1] < a[e2] && a[e2] < a[e3] && a[e3] < a[e4] && a[e4] < a[e5]) {
+
+                /*
+                 * Use the first and fifth of the five sorted elements as
+                 * the pivots. These values are inexpensive approximation
+                 * of tertiles. Note, that pivot1 < pivot2.
+                 */
+                int pivot1 = a[e1];
+                int pivot2 = a[e5];
+
+                /*
+                 * The first and the last elements to be sorted are moved
+                 * to the locations formerly occupied by the pivots. When
+                 * partitioning is completed, the pivots are swapped back
+                 * into their final positions, and excluded from the next
+                 * subsequent sorting.
+                 */
+                a[e1] = a[lower];
+                a[e5] = a[upper];
+
+                /*
+                 * Skip elements, which are less or greater than the pivots.
+                 */
+                while (a[++lower] < pivot1);
+                while (a[--upper] > pivot2);
+
+                /*
+                 * Backward 3-interval partitioning
+                 *
+                 *   left part                 central part          right part
+                 * +------------------------------------------------------------+
+                 * |  < pivot1  |   ?   |  pivot1 <= && <= pivot2  |  > pivot2  |
+                 * +------------------------------------------------------------+
+                 *             ^       ^                            ^
+                 *             |       |                            |
+                 *           lower     k                          upper
+                 *
+                 * Invariants:
+                 *
+                 *              all in (low, lower] < pivot1
+                 *    pivot1 <= all in (k, upper)  <= pivot2
+                 *              all in [upper, end) > pivot2
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int unused = --lower, k = ++upper; --k > lower; ) {
+                    int ak = a[k];
+
+                    if (ak < pivot1) { // Move a[k] to the left side
+                        while (lower < k) {
+                            if (a[++lower] >= pivot1) {
+                                if (a[lower] > pivot2) {
+                                    a[k] = a[--upper];
+                                    a[upper] = a[lower];
+                                } else {
+                                    a[k] = a[lower];
+                                }
+                                a[lower] = ak;
+                                break;
+                            }
+                        }
+                    } else if (ak > pivot2) { // Move a[k] to the right side
+                        a[k] = a[--upper];
+                        a[upper] = ak;
                     }
                 }
-                run[++last] = hi;
+
+                /*
+                 * Swap the pivots into their final positions.
+                 */
+                a[low] = a[lower]; a[lower] = pivot1;
+                a[end] = a[upper]; a[upper] = pivot2;
+
+                /*
+                 * Sort non-left parts recursively (possibly in parallel),
+                 * excluding known pivots.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, lower + 1, upper);
+                    sorter.forkSorter(bits | 1, upper + 1, high);
+                } else {
+                    sort(sorter, a, bits | 1, lower + 1, upper);
+                    sort(sorter, a, bits | 1, upper + 1, high);
+                }
+
+            } else { // Use single pivot in case of many equal elements
+
+                /*
+                 * Use the third of the five sorted elements as the pivot.
+                 * This value is inexpensive approximation of the median.
+                 */
+                int pivot = a[e3];
+
+                /*
+                 * The first element to be sorted is moved to the
+                 * location formerly occupied by the pivot. After
+                 * completion of partitioning the pivot is swapped
+                 * back into its final position, and excluded from
+                 * the next subsequent sorting.
+                 */
+                a[e3] = a[lower];
+
+                /*
+                 * Traditional 3-way (Dutch National Flag) partitioning
+                 *
+                 *   left part                 central part    right part
+                 * +------------------------------------------------------+
+                 * |   < pivot   |     ?     |   == pivot   |   > pivot   |
+                 * +------------------------------------------------------+
+                 *              ^           ^                ^
+                 *              |           |                |
+                 *            lower         k              upper
+                 *
+                 * Invariants:
+                 *
+                 *   all in (low, lower] < pivot
+                 *   all in (k, upper)  == pivot
+                 *   all in [upper, end] > pivot
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int k = ++upper; --k > lower; ) {
+                    int ak = a[k];
+
+                    if (ak != pivot) {
+                        a[k] = pivot;
+
+                        if (ak < pivot) { // Move a[k] to the left side
+                            while (a[++lower] < pivot);
+
+                            if (a[lower] > pivot) {
+                                a[--upper] = a[lower];
+                            }
+                            a[lower] = ak;
+                        } else { // ak > pivot - Move a[k] to the right side
+                            a[--upper] = ak;
+                        }
+                    }
+                }
+
+                /*
+                 * Swap the pivot into its final position.
+                 */
+                a[low] = a[lower]; a[lower] = pivot;
+
+                /*
+                 * Sort the right part (possibly in parallel), excluding
+                 * known pivot. All elements from the central part are
+                 * equal and therefore already sorted.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, upper, high);
+                } else {
+                    sort(sorter, a, bits | 1, upper, high);
+                }
             }
-            if ((count & 1) != 0) {
-                for (int i = right, lo = run[count - 1]; --i >= lo;
-                    b[i + bo] = a[i + ao]
-                );
-                run[++last] = right;
-            }
-            int[] t = a; a = b; b = t;
-            int o = ao; ao = bo; bo = o;
+            high = lower; // Iterate along the left part
         }
     }
 
     /**
-     * Sorts the specified range of the array by Dual-Pivot Quicksort.
+     * Sorts the specified range of the array using mixed insertion sort.
+     *
+     * Mixed insertion sort is combination of simple insertion sort,
+     * pin insertion sort and pair insertion sort.
+     *
+     * In the context of Dual-Pivot Quicksort, the pivot element
+     * from the left part plays the role of sentinel, because it
+     * is less than any elements from the given part. Therefore,
+     * expensive check of the left range can be skipped on each
+     * iteration unless it is the leftmost call.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param leftmost indicates if this part is the leftmost in the range
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param end the index of the last element for simple insertion sort
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void sort(int[] a, int left, int right, boolean leftmost) {
-        int length = right - left + 1;
-
-        // Use insertion sort on tiny arrays
-        if (length < INSERTION_SORT_THRESHOLD) {
-            if (leftmost) {
-                /*
-                 * Traditional (without sentinel) insertion sort,
-                 * optimized for server VM, is used in case of
-                 * the leftmost part.
-                 */
-                for (int i = left, j = i; i < right; j = ++i) {
-                    int ai = a[i + 1];
-                    while (ai < a[j]) {
-                        a[j + 1] = a[j];
-                        if (j-- == left) {
-                            break;
-                        }
-                    }
-                    a[j + 1] = ai;
-                }
-            } else {
-                /*
-                 * Skip the longest ascending sequence.
-                 */
-                do {
-                    if (left >= right) {
-                        return;
-                    }
-                } while (a[++left] >= a[left - 1]);
-
-                /*
-                 * Every element from adjoining part plays the role
-                 * of sentinel, therefore this allows us to avoid the
-                 * left range check on each iteration. Moreover, we use
-                 * the more optimized algorithm, so called pair insertion
-                 * sort, which is faster (in the context of Quicksort)
-                 * than traditional implementation of insertion sort.
-                 */
-                for (int k = left; ++left <= right; k = ++left) {
-                    int a1 = a[k], a2 = a[left];
-
-                    if (a1 < a2) {
-                        a2 = a1; a1 = a[left];
-                    }
-                    while (a1 < a[--k]) {
-                        a[k + 2] = a[k];
-                    }
-                    a[++k + 1] = a1;
-
-                    while (a2 < a[--k]) {
-                        a[k + 1] = a[k];
-                    }
-                    a[k + 1] = a2;
-                }
-                int last = a[right];
-
-                while (last < a[--right]) {
-                    a[right + 1] = a[right];
-                }
-                a[right + 1] = last;
-            }
-            return;
-        }
-
-        // Inexpensive approximation of length / 7
-        int seventh = (length >> 3) + (length >> 6) + 1;
-
-        /*
-         * Sort five evenly spaced elements around (and including) the
-         * center element in the range. These elements will be used for
-         * pivot selection as described below. The choice for spacing
-         * these elements was empirically determined to work well on
-         * a wide variety of inputs.
-         */
-        int e3 = (left + right) >>> 1; // The midpoint
-        int e2 = e3 - seventh;
-        int e1 = e2 - seventh;
-        int e4 = e3 + seventh;
-        int e5 = e4 + seventh;
-
-        // Sort these elements using insertion sort
-        if (a[e2] < a[e1]) { int t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
-
-        if (a[e3] < a[e2]) { int t = a[e3]; a[e3] = a[e2]; a[e2] = t;
-            if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-        }
-        if (a[e4] < a[e3]) { int t = a[e4]; a[e4] = a[e3]; a[e3] = t;
-            if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-            }
-        }
-        if (a[e5] < a[e4]) { int t = a[e5]; a[e5] = a[e4]; a[e4] = t;
-            if (t < a[e3]) { a[e4] = a[e3]; a[e3] = t;
-                if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                    if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-                }
-            }
-        }
-
-        // Pointers
-        int less  = left;  // The index of the first element of center part
-        int great = right; // The index before the first element of right part
-
-        if (a[e1] != a[e2] && a[e2] != a[e3] && a[e3] != a[e4] && a[e4] != a[e5]) {
-            /*
-             * Use the second and fourth of the five sorted elements as pivots.
-             * These values are inexpensive approximations of the first and
-             * second terciles of the array. Note that pivot1 <= pivot2.
-             */
-            int pivot1 = a[e2];
-            int pivot2 = a[e4];
+    private static void mixedInsertionSort(int[] a, int low, int end, int high) {
+        if (end == high) {
 
             /*
-             * The first and the last elements to be sorted are moved to the
-             * locations formerly occupied by the pivots. When partitioning
-             * is complete, the pivots are swapped back into their final
-             * positions, and excluded from subsequent sorting.
+             * Invoke simple insertion sort on tiny array.
              */
-            a[e2] = a[left];
-            a[e4] = a[right];
-
-            /*
-             * Skip elements, which are less or greater than pivot values.
-             */
-            while (a[++less] < pivot1);
-            while (a[--great] > pivot2);
+            for (int i; ++low < end; ) {
+                int ai = a[i = low];
+
+                while (ai < a[--i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        } else {
 
             /*
-             * Partitioning:
-             *
-             *   left part           center part                   right part
-             * +--------------------------------------------------------------+
-             * |  < pivot1  |  pivot1 <= && <= pivot2  |    ?    |  > pivot2  |
-             * +--------------------------------------------------------------+
-             *               ^                          ^       ^
-             *               |                          |       |
-             *              less                        k     great
-             *
-             * Invariants:
-             *
-             *              all in (left, less)   < pivot1
-             *    pivot1 <= all in [less, k)     <= pivot2
-             *              all in (great, right) > pivot2
+             * Start with pin insertion sort on small part.
              *
-             * Pointer k is the first index of ?-part.
-             */
-            outer:
-            for (int k = less - 1; ++k <= great; ) {
-                int ak = a[k];
-                if (ak < pivot1) { // Move a[k] to left part
-                    a[k] = a[less];
-                    /*
-                     * Here and below we use "a[i] = b; i++;" instead
-                     * of "a[i++] = b;" due to performance issue.
-                     */
-                    a[less] = ak;
-                    ++less;
-                } else if (ak > pivot2) { // Move a[k] to right part
-                    while (a[great] > pivot2) {
-                        if (great-- == k) {
-                            break outer;
-                        }
-                    }
-                    if (a[great] < pivot1) { // a[great] <= pivot2
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // pivot1 <= a[great] <= pivot2
-                        a[k] = a[great];
-                    }
-                    /*
-                     * Here and below we use "a[i] = b; i--;" instead
-                     * of "a[i--] = b;" due to performance issue.
-                     */
-                    a[great] = ak;
-                    --great;
-                }
-            }
-
-            // Swap pivots into their final positions
-            a[left]  = a[less  - 1]; a[less  - 1] = pivot1;
-            a[right] = a[great + 1]; a[great + 1] = pivot2;
-
-            // Sort left and right parts recursively, excluding known pivots
-            sort(a, left, less - 2, leftmost);
-            sort(a, great + 2, right, false);
-
-            /*
-             * If center part is too large (comprises > 4/7 of the array),
-             * swap internal pivot values to ends.
+             * Pin insertion sort is extended simple insertion sort.
+             * The main idea of this sort is to put elements larger
+             * than an element called pin to the end of array (the
+             * proper area for such elements). It avoids expensive
+             * movements of these elements through the whole array.
              */
-            if (less < e1 && e5 < great) {
-                /*
-                 * Skip elements, which are equal to pivot values.
-                 */
-                while (a[less] == pivot1) {
-                    ++less;
-                }
-
-                while (a[great] == pivot2) {
-                    --great;
-                }
-
-                /*
-                 * Partitioning:
-                 *
-                 *   left part         center part                  right part
-                 * +----------------------------------------------------------+
-                 * | == pivot1 |  pivot1 < && < pivot2  |    ?    | == pivot2 |
-                 * +----------------------------------------------------------+
-                 *              ^                        ^       ^
-                 *              |                        |       |
-                 *             less                      k     great
-                 *
-                 * Invariants:
-                 *
-                 *              all in (*,  less) == pivot1
-                 *     pivot1 < all in [less,  k)  < pivot2
-                 *              all in (great, *) == pivot2
-                 *
-                 * Pointer k is the first index of ?-part.
-                 */
-                outer:
-                for (int k = less - 1; ++k <= great; ) {
-                    int ak = a[k];
-                    if (ak == pivot1) { // Move a[k] to left part
-                        a[k] = a[less];
-                        a[less] = ak;
-                        ++less;
-                    } else if (ak == pivot2) { // Move a[k] to right part
-                        while (a[great] == pivot2) {
-                            if (great-- == k) {
-                                break outer;
-                            }
-                        }
-                        if (a[great] == pivot1) { // a[great] < pivot2
-                            a[k] = a[less];
-                            /*
-                             * Even though a[great] equals to pivot1, the
-                             * assignment a[less] = pivot1 may be incorrect,
-                             * if a[great] and pivot1 are floating-point zeros
-                             * of different signs. Therefore in float and
-                             * double sorting methods we have to use more
-                             * accurate assignment a[less] = a[great].
-                             */
-                            a[less] = pivot1;
-                            ++less;
-                        } else { // pivot1 < a[great] < pivot2
-                            a[k] = a[great];
-                        }
-                        a[great] = ak;
-                        --great;
+            int pin = a[end];
+
+            for (int i, p = high; ++low < end; ) {
+                int ai = a[i = low];
+
+                if (ai < a[i - 1]) { // Small element
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    a[i] = a[--i];
+
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
                     }
-                }
-            }
-
-            // Sort center part recursively
-            sort(a, less, great, false);
-
-        } else { // Partitioning with one pivot
-            /*
-             * Use the third of the five sorted elements as pivot.
-             * This value is inexpensive approximation of the median.
-             */
-            int pivot = a[e3];
-
-            /*
-             * Partitioning degenerates to the traditional 3-way
-             * (or "Dutch National Flag") schema:
-             *
-             *   left part    center part              right part
-             * +-------------------------------------------------+
-             * |  < pivot  |   == pivot   |     ?    |  > pivot  |
-             * +-------------------------------------------------+
-             *              ^              ^        ^
-             *              |              |        |
-             *             less            k      great
-             *
-             * Invariants:
-             *
-             *   all in (left, less)   < pivot
-             *   all in [less, k)     == pivot
-             *   all in (great, right) > pivot
-             *
-             * Pointer k is the first index of ?-part.
-             */
-            for (int k = less; k <= great; ++k) {
-                if (a[k] == pivot) {
-                    continue;
-                }
-                int ak = a[k];
-                if (ak < pivot) { // Move a[k] to left part
-                    a[k] = a[less];
-                    a[less] = ak;
-                    ++less;
-                } else { // a[k] > pivot - Move a[k] to right part
-                    while (a[great] > pivot) {
-                        --great;
+                    a[i + 1] = ai;
+
+                } else if (p > i && ai > pin) { // Large element
+
+                    /*
+                     * Find element smaller than pin.
+                     */
+                    while (a[--p] > pin);
+
+                    /*
+                     * Swap it with large element.
+                     */
+                    if (p > i) {
+                        ai = a[p];
+                        a[p] = a[i];
                     }
-                    if (a[great] < pivot) { // a[great] <= pivot
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // a[great] == pivot
-                        /*
-                         * Even though a[great] equals to pivot, the
-                         * assignment a[k] = pivot may be incorrect,
-                         * if a[great] and pivot are floating-point
-                         * zeros of different signs. Therefore in float
-                         * and double sorting methods we have to use
-                         * more accurate assignment a[k] = a[great].
-                         */
-                        a[k] = pivot;
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
                     }
-                    a[great] = ak;
-                    --great;
+                    a[i + 1] = ai;
                 }
             }
 
             /*
-             * Sort left and right parts recursively.
-             * All elements from center part are equal
-             * and, therefore, already sorted.
+             * Continue with pair insertion sort on remain part.
              */
-            sort(a, left, less - 1, leftmost);
-            sort(a, great + 1, right, false);
+            for (int i; low < high; ++low) {
+                int a1 = a[i = low], a2 = a[++low];
+
+                /*
+                 * Insert two elements per iteration: at first, insert the
+                 * larger element and then insert the smaller element, but
+                 * from the position where the larger element was inserted.
+                 */
+                if (a1 > a2) {
+
+                    while (a1 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a1;
+
+                    while (a2 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a2;
+
+                } else if (a1 < a[i - 1]) {
+
+                    while (a2 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a2;
+
+                    while (a1 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a1;
+                }
+            }
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using insertion sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void insertionSort(int[] a, int low, int high) {
+        for (int i, k = low; ++k < high; ) {
+            int ai = a[i = k];
+
+            if (ai < a[i - 1]) {
+                while (--i >= low && ai < a[i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using heap sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void heapSort(int[] a, int low, int high) {
+        for (int k = (low + high) >>> 1; k > low; ) {
+            pushDown(a, --k, a[k], low, high);
+        }
+        while (--high > low) {
+            int max = a[low];
+            pushDown(a, low, a[high], low, high);
+            a[high] = max;
         }
     }
 
     /**
-     * Sorts the specified range of the array using the given
-     * workspace array slice if possible for merging
+     * Pushes specified element down during heap sort.
      *
-     * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param a the given array
+     * @param p the start index
+     * @param value the given element
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    static void sort(long[] a, int left, int right,
-                     long[] work, int workBase, int workLen) {
-        // Use Quicksort on small arrays
-        if (right - left < QUICKSORT_THRESHOLD) {
-            sort(a, left, right, true);
-            return;
-        }
-
-        /*
-         * Index run[i] is the start of i-th run
-         * (ascending or descending sequence).
-         */
-        int[] run = new int[MAX_RUN_COUNT + 1];
-        int count = 0; run[0] = left;
-
-        // Check if the array is nearly sorted
-        for (int k = left; k < right; run[count] = k) {
-            // Equal items in the beginning of the sequence
-            while (k < right && a[k] == a[k + 1])
-                k++;
-            if (k == right) break;  // Sequence finishes with equal items
-            if (a[k] < a[k + 1]) { // ascending
-                while (++k <= right && a[k - 1] <= a[k]);
-            } else if (a[k] > a[k + 1]) { // descending
-                while (++k <= right && a[k - 1] >= a[k]);
-                // Transform into an ascending sequence
-                for (int lo = run[count] - 1, hi = k; ++lo < --hi; ) {
-                    long t = a[lo]; a[lo] = a[hi]; a[hi] = t;
-                }
+    private static void pushDown(int[] a, int p, int value, int low, int high) {
+        for (int k ;; a[p] = a[p = k]) {
+            k = (p << 1) - low + 2; // Index of the right child
+
+            if (k > high) {
+                break;
             }
-
-            // Merge a transformed descending sequence followed by an
-            // ascending sequence
-            if (run[count] > left && a[run[count]] >= a[run[count] - 1]) {
-                count--;
+            if (k == high || a[k] < a[k - 1]) {
+                --k;
             }
-
-            /*
-             * The array is not highly structured,
-             * use Quicksort instead of merge sort.
-             */
-            if (++count == MAX_RUN_COUNT) {
-                sort(a, left, right, true);
-                return;
+            if (a[k] <= value) {
+                break;
             }
         }
-
-        // These invariants should hold true:
-        //    run[0] = 0
-        //    run[<last>] = right + 1; (terminator)
-
-        if (count == 0) {
-            // A single equal run
-            return;
-        } else if (count == 1 && run[count] > right) {
-            // Either a single ascending or a transformed descending run.
-            // Always check that a final run is a proper terminator, otherwise
-            // we have an unterminated trailing run, to handle downstream.
-            return;
-        }
-        right++;
-        if (run[count] < right) {
-            // Corner case: the final run is not a terminator. This may happen
-            // if a final run is an equals run, or there is a single-element run
-            // at the end. Fix up by adding a proper terminator at the end.
-            // Note that we terminate with (right + 1), incremented earlier.
-            run[++count] = right;
-        }
-
-        // Determine alternation base for merge
-        byte odd = 0;
-        for (int n = 1; (n <<= 1) < count; odd ^= 1);
-
-        // Use or create temporary array b for merging
-        long[] b;                 // temp array; alternates with a
-        int ao, bo;              // array offsets from 'left'
-        int blen = right - left; // space needed for b
-        if (work == null || workLen < blen || workBase + blen > work.length) {
-            work = new long[blen];
-            workBase = 0;
-        }
-        if (odd == 0) {
-            System.arraycopy(a, left, work, workBase, blen);
-            b = a;
-            bo = 0;
-            a = work;
-            ao = workBase - left;
-        } else {
-            b = work;
-            ao = 0;
-            bo = workBase - left;
-        }
-
-        // Merging
-        for (int last; count > 1; count = last) {
-            for (int k = (last = 0) + 2; k <= count; k += 2) {
-                int hi = run[k], mi = run[k - 1];
-                for (int i = run[k - 2], p = i, q = mi; i < hi; ++i) {
-                    if (q >= hi || p < mi && a[p + ao] <= a[q + ao]) {
-                        b[i + bo] = a[p++ + ao];
-                    } else {
-                        b[i + bo] = a[q++ + ao];
-                    }
-                }
-                run[++last] = hi;
-            }
-            if ((count & 1) != 0) {
-                for (int i = right, lo = run[count - 1]; --i >= lo;
-                    b[i + bo] = a[i + ao]
-                );
-                run[++last] = right;
-            }
-            long[] t = a; a = b; b = t;
-            int o = ao; ao = bo; bo = o;
-        }
+        a[p] = value;
     }
 
     /**
-     * Sorts the specified range of the array by Dual-Pivot Quicksort.
+     * Tries to sort the specified range of the array.
      *
+     * @param sorter parallel context
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param leftmost indicates if this part is the leftmost in the range
+     * @param low the index of the first element to be sorted
+     * @param size the array size
+     * @return true if finally sorted, false otherwise
      */
-    private static void sort(long[] a, int left, int right, boolean leftmost) {
-        int length = right - left + 1;
-
-        // Use insertion sort on tiny arrays
-        if (length < INSERTION_SORT_THRESHOLD) {
-            if (leftmost) {
-                /*
-                 * Traditional (without sentinel) insertion sort,
-                 * optimized for server VM, is used in case of
-                 * the leftmost part.
-                 */
-                for (int i = left, j = i; i < right; j = ++i) {
-                    long ai = a[i + 1];
-                    while (ai < a[j]) {
-                        a[j + 1] = a[j];
-                        if (j-- == left) {
-                            break;
-                        }
-                    }
-                    a[j + 1] = ai;
-                }
-            } else {
-                /*
-                 * Skip the longest ascending sequence.
-                 */
-                do {
-                    if (left >= right) {
-                        return;
-                    }
-                } while (a[++left] >= a[left - 1]);
-
-                /*
-                 * Every element from adjoining part plays the role
-                 * of sentinel, therefore this allows us to avoid the
-                 * left range check on each iteration. Moreover, we use
-                 * the more optimized algorithm, so called pair insertion
-                 * sort, which is faster (in the context of Quicksort)
-                 * than traditional implementation of insertion sort.
-                 */
-                for (int k = left; ++left <= right; k = ++left) {
-                    long a1 = a[k], a2 = a[left];
-
-                    if (a1 < a2) {
-                        a2 = a1; a1 = a[left];
-                    }
-                    while (a1 < a[--k]) {
-                        a[k + 2] = a[k];
-                    }
-                    a[++k + 1] = a1;
-
-                    while (a2 < a[--k]) {
-                        a[k + 1] = a[k];
-                    }
-                    a[k + 1] = a2;
-                }
-                long last = a[right];
-
-                while (last < a[--right]) {
-                    a[right + 1] = a[right];
-                }
-                a[right + 1] = last;
-            }
-            return;
-        }
-
-        // Inexpensive approximation of length / 7
-        int seventh = (length >> 3) + (length >> 6) + 1;
+    private static boolean tryMergeRuns(Sorter sorter, int[] a, int low, int size) {
 
         /*
-         * Sort five evenly spaced elements around (and including) the
-         * center element in the range. These elements will be used for
-         * pivot selection as described below. The choice for spacing
-         * these elements was empirically determined to work well on
-         * a wide variety of inputs.
+         * The run array is constructed only if initial runs are
+         * long enough to continue, run[i] then holds start index
+         * of the i-th sequence of elements in non-descending order.
          */
-        int e3 = (left + right) >>> 1; // The midpoint
-        int e2 = e3 - seventh;
-        int e1 = e2 - seventh;
-        int e4 = e3 + seventh;
-        int e5 = e4 + seventh;
-
-        // Sort these elements using insertion sort
-        if (a[e2] < a[e1]) { long t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
-
-        if (a[e3] < a[e2]) { long t = a[e3]; a[e3] = a[e2]; a[e2] = t;
-            if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-        }
-        if (a[e4] < a[e3]) { long t = a[e4]; a[e4] = a[e3]; a[e3] = t;
-            if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-            }
-        }
-        if (a[e5] < a[e4]) { long t = a[e5]; a[e5] = a[e4]; a[e4] = t;
-            if (t < a[e3]) { a[e4] = a[e3]; a[e3] = t;
-                if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                    if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-                }
-            }
-        }
-
-        // Pointers
-        int less  = left;  // The index of the first element of center part
-        int great = right; // The index before the first element of right part
-
-        if (a[e1] != a[e2] && a[e2] != a[e3] && a[e3] != a[e4] && a[e4] != a[e5]) {
-            /*
-             * Use the second and fourth of the five sorted elements as pivots.
-             * These values are inexpensive approximations of the first and
-             * second terciles of the array. Note that pivot1 <= pivot2.
-             */
-            long pivot1 = a[e2];
-            long pivot2 = a[e4];
-
-            /*
-             * The first and the last elements to be sorted are moved to the
-             * locations formerly occupied by the pivots. When partitioning
-             * is complete, the pivots are swapped back into their final
-             * positions, and excluded from subsequent sorting.
-             */
-            a[e2] = a[left];
-            a[e4] = a[right];
-
-            /*
-             * Skip elements, which are less or greater than pivot values.
-             */
-            while (a[++less] < pivot1);
-            while (a[--great] > pivot2);
+        int[] run = null;
+        int high = low + size;
+        int count = 1, last = low;
+
+        /*
+         * Identify all possible runs.
+         */
+        for (int k = low + 1; k < high; ) {
 
             /*
-             * Partitioning:
-             *
-             *   left part           center part                   right part
-             * +--------------------------------------------------------------+
-             * |  < pivot1  |  pivot1 <= && <= pivot2  |    ?    |  > pivot2  |
-             * +--------------------------------------------------------------+
-             *               ^                          ^       ^
-             *               |                          |       |
-             *              less                        k     great
-             *
-             * Invariants:
-             *
-             *              all in (left, less)   < pivot1
-             *    pivot1 <= all in [less, k)     <= pivot2
-             *              all in (great, right) > pivot2
-             *
-             * Pointer k is the first index of ?-part.
+             * Find the end index of the current run.
              */
-            outer:
-            for (int k = less - 1; ++k <= great; ) {
-                long ak = a[k];
-                if (ak < pivot1) { // Move a[k] to left part
-                    a[k] = a[less];
-                    /*
-                     * Here and below we use "a[i] = b; i++;" instead
-                     * of "a[i++] = b;" due to performance issue.
-                     */
-                    a[less] = ak;
-                    ++less;
-                } else if (ak > pivot2) { // Move a[k] to right part
-                    while (a[great] > pivot2) {
-                        if (great-- == k) {
-                            break outer;
-                        }
-                    }
-                    if (a[great] < pivot1) { // a[great] <= pivot2
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // pivot1 <= a[great] <= pivot2
-                        a[k] = a[great];
-                    }
-                    /*
-                     * Here and below we use "a[i] = b; i--;" instead
-                     * of "a[i--] = b;" due to performance issue.
-                     */
-                    a[great] = ak;
-                    --great;
-                }
-            }
-
-            // Swap pivots into their final positions
-            a[left]  = a[less  - 1]; a[less  - 1] = pivot1;
-            a[right] = a[great + 1]; a[great + 1] = pivot2;
-
-            // Sort left and right parts recursively, excluding known pivots
-            sort(a, left, less - 2, leftmost);
-            sort(a, great + 2, right, false);
-
-            /*
-             * If center part is too large (comprises > 4/7 of the array),
-             * swap internal pivot values to ends.
-             */
-            if (less < e1 && e5 < great) {
-                /*
-                 * Skip elements, which are equal to pivot values.
-                 */
-                while (a[less] == pivot1) {
-                    ++less;
-                }
-
-                while (a[great] == pivot2) {
-                    --great;
+            if (a[k - 1] < a[k]) {
+
+                // Identify ascending sequence
+                while (++k < high && a[k - 1] <= a[k]);
+
+            } else if (a[k - 1] > a[k]) {
+
+                // Identify descending sequence
+                while (++k < high && a[k - 1] >= a[k]);
+
+                // Reverse into ascending order
+                for (int i = last - 1, j = k; ++i < --j && a[i] > a[j]; ) {
+                    int ai = a[i]; a[i] = a[j]; a[j] = ai;
                 }
-
-                /*
-                 * Partitioning:
-                 *
-                 *   left part         center part                  right part
-                 * +----------------------------------------------------------+
-                 * | == pivot1 |  pivot1 < && < pivot2  |    ?    | == pivot2 |
-                 * +----------------------------------------------------------+
-                 *              ^                        ^       ^
-                 *              |                        |       |
-                 *             less                      k     great
-                 *
-                 * Invariants:
-                 *
-                 *              all in (*,  less) == pivot1
-                 *     pivot1 < all in [less,  k)  < pivot2
-                 *              all in (great, *) == pivot2
-                 *
-                 * Pointer k is the first index of ?-part.
-                 */
-                outer:
-                for (int k = less - 1; ++k <= great; ) {
-                    long ak = a[k];
-                    if (ak == pivot1) { // Move a[k] to left part
-                        a[k] = a[less];
-                        a[less] = ak;
-                        ++less;
-                    } else if (ak == pivot2) { // Move a[k] to right part
-                        while (a[great] == pivot2) {
-                            if (great-- == k) {
-                                break outer;
-                            }
-                        }
-                        if (a[great] == pivot1) { // a[great] < pivot2
-                            a[k] = a[less];
-                            /*
-                             * Even though a[great] equals to pivot1, the
-                             * assignment a[less] = pivot1 may be incorrect,
-                             * if a[great] and pivot1 are floating-point zeros
-                             * of different signs. Therefore in float and
-                             * double sorting methods we have to use more
-                             * accurate assignment a[less] = a[great].
-                             */
-                            a[less] = pivot1;
-                            ++less;
-                        } else { // pivot1 < a[great] < pivot2
-                            a[k] = a[great];
-                        }
-                        a[great] = ak;
-                        --great;
-                    }
-                }
-            }
-
-            // Sort center part recursively
-            sort(a, less, great, false);
-
-        } else { // Partitioning with one pivot
-            /*
-             * Use the third of the five sorted elements as pivot.
-             * This value is inexpensive approximation of the median.
-             */
-            long pivot = a[e3];
-
-            /*
-             * Partitioning degenerates to the traditional 3-way
-             * (or "Dutch National Flag") schema:
-             *
-             *   left part    center part              right part
-             * +-------------------------------------------------+
-             * |  < pivot  |   == pivot   |     ?    |  > pivot  |
-             * +-------------------------------------------------+
-             *              ^              ^        ^
-             *              |              |        |
-             *             less            k      great
-             *
-             * Invariants:
-             *
-             *   all in (left, less)   < pivot
-             *   all in [less, k)     == pivot
-             *   all in (great, right) > pivot
-             *
-             * Pointer k is the first index of ?-part.
-             */
-            for (int k = less; k <= great; ++k) {
-                if (a[k] == pivot) {
+            } else { // Identify constant sequence
+                for (int ak = a[k]; ++k < high && ak == a[k]; );
+
+                if (k < high) {
                     continue;
                 }
-                long ak = a[k];
-                if (ak < pivot) { // Move a[k] to left part
-                    a[k] = a[less];
-                    a[less] = ak;
-                    ++less;
-                } else { // a[k] > pivot - Move a[k] to right part
-                    while (a[great] > pivot) {
-                        --great;
-                    }
-                    if (a[great] < pivot) { // a[great] <= pivot
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // a[great] == pivot
-                        /*
-                         * Even though a[great] equals to pivot, the
-                         * assignment a[k] = pivot may be incorrect,
-                         * if a[great] and pivot are floating-point
-                         * zeros of different signs. Therefore in float
-                         * and double sorting methods we have to use
-                         * more accurate assignment a[k] = a[great].
-                         */
-                        a[k] = pivot;
-                    }
-                    a[great] = ak;
-                    --great;
-                }
             }
 
             /*
-             * Sort left and right parts recursively.
-             * All elements from center part are equal
-             * and, therefore, already sorted.
+             * Check special cases.
              */
-            sort(a, left, less - 1, leftmost);
-            sort(a, great + 1, right, false);
+            if (run == null) {
+                if (k == high) {
+
+                    /*
+                     * The array is monotonous sequence,
+                     * and therefore already sorted.
+                     */
+                    return true;
+                }
+
+                if (k - low < MIN_FIRST_RUN_SIZE) {
+
+                    /*
+                     * The first run is too small
+                     * to proceed with scanning.
+                     */
+                    return false;
+                }
+
+                run = new int[((size >> 10) | 0x7F) & 0x3FF];
+                run[0] = low;
+
+            } else if (a[last - 1] > a[last]) {
+
+                if (count > (k - low) >> MIN_FIRST_RUNS_FACTOR) {
+
+                    /*
+                     * The first runs are not long
+                     * enough to continue scanning.
+                     */
+                    return false;
+                }
+
+                if (++count == MAX_RUN_CAPACITY) {
+
+                    /*
+                     * Array is not highly structured.
+                     */
+                    return false;
+                }
+
+                if (count == run.length) {
+
+                    /*
+                     * Increase capacity of index array.
+                     */
+                    run = Arrays.copyOf(run, count << 1);
+                }
+            }
+            run[count] = (last = k);
+        }
+
+        /*
+         * Merge runs of highly structured array.
+         */
+        if (count > 1) {
+            int[] b; int offset = low;
+
+            if (sorter == null || (b = (int[]) sorter.b) == null) {
+                b = new int[size];
+            } else {
+                offset = sorter.offset;
+            }
+            mergeRuns(a, b, offset, 1, sorter != null, run, 0, count);
+        }
+        return true;
+    }
+
+    /**
+     * Merges the specified runs.
+     *
+     * @param a the source array
+     * @param b the temporary buffer used in merging
+     * @param offset the start index in the source, inclusive
+     * @param aim specifies merging: to source ( > 0), buffer ( < 0) or any ( == 0)
+     * @param parallel indicates whether merging is performed in parallel
+     * @param run the start indexes of the runs, inclusive
+     * @param lo the start index of the first run, inclusive
+     * @param hi the start index of the last run, inclusive
+     * @return the destination where runs are merged
+     */
+    private static int[] mergeRuns(int[] a, int[] b, int offset,
+            int aim, boolean parallel, int[] run, int lo, int hi) {
+
+        if (hi - lo == 1) {
+            if (aim >= 0) {
+                return a;
+            }
+            for (int i = run[hi], j = i - offset, low = run[lo]; i > low;
+                b[--j] = a[--i]
+            );
+            return b;
+        }
+
+        /*
+         * Split into approximately equal parts.
+         */
+        int mi = lo, rmi = (run[lo] + run[hi]) >>> 1;
+        while (run[++mi + 1] <= rmi);
+
+        /*
+         * Merge the left and right parts.
+         */
+        int[] a1, a2;
+
+        if (parallel && hi - lo > MIN_RUN_COUNT) {
+            RunMerger merger = new RunMerger(a, b, offset, 0, run, mi, hi).forkMe();
+            a1 = mergeRuns(a, b, offset, -aim, true, run, lo, mi);
+            a2 = (int[]) merger.getDestination();
+        } else {
+            a1 = mergeRuns(a, b, offset, -aim, false, run, lo, mi);
+            a2 = mergeRuns(a, b, offset,    0, false, run, mi, hi);
+        }
+
+        int[] dst = a1 == a ? b : a;
+
+        int k   = a1 == a ? run[lo] - offset : run[lo];
+        int lo1 = a1 == b ? run[lo] - offset : run[lo];
+        int hi1 = a1 == b ? run[mi] - offset : run[mi];
+        int lo2 = a2 == b ? run[mi] - offset : run[mi];
+        int hi2 = a2 == b ? run[hi] - offset : run[hi];
+
+        if (parallel) {
+            new Merger(null, dst, k, a1, lo1, hi1, a2, lo2, hi2).invoke();
+        } else {
+            mergeParts(null, dst, k, a1, lo1, hi1, a2, lo2, hi2);
+        }
+        return dst;
+    }
+
+    /**
+     * Merges the sorted parts.
+     *
+     * @param merger parallel context
+     * @param dst the destination where parts are merged
+     * @param k the start index of the destination, inclusive
+     * @param a1 the first part
+     * @param lo1 the start index of the first part, inclusive
+     * @param hi1 the end index of the first part, exclusive
+     * @param a2 the second part
+     * @param lo2 the start index of the second part, inclusive
+     * @param hi2 the end index of the second part, exclusive
+     */
+    private static void mergeParts(Merger merger, int[] dst, int k,
+            int[] a1, int lo1, int hi1, int[] a2, int lo2, int hi2) {
+
+        if (merger != null && a1 == a2) {
+
+            while (true) {
+
+                /*
+                 * The first part must be larger.
+                 */
+                if (hi1 - lo1 < hi2 - lo2) {
+                    int lo = lo1; lo1 = lo2; lo2 = lo;
+                    int hi = hi1; hi1 = hi2; hi2 = hi;
+                }
+
+                /*
+                 * Small parts will be merged sequentially.
+                 */
+                if (hi1 - lo1 < MIN_PARALLEL_MERGE_PARTS_SIZE) {
+                    break;
+                }
+
+                /*
+                 * Find the median of the larger part.
+                 */
+                int mi1 = (lo1 + hi1) >>> 1;
+                int key = a1[mi1];
+                int mi2 = hi2;
+
+                /*
+                 * Partition the smaller part.
+                 */
+                for (int loo = lo2; loo < mi2; ) {
+                    int t = (loo + mi2) >>> 1;
+
+                    if (key > a2[t]) {
+                        loo = t + 1;
+                    } else {
+                        mi2 = t;
+                    }
+                }
+
+                int d = mi2 - lo2 + mi1 - lo1;
+
+                /*
+                 * Merge the right sub-parts in parallel.
+                 */
+                merger.forkMerger(dst, k + d, a1, mi1, hi1, a2, mi2, hi2);
+
+                /*
+                 * Process the sub-left parts.
+                 */
+                hi1 = mi1;
+                hi2 = mi2;
+            }
+        }
+
+        /*
+         * Merge small parts sequentially.
+         */
+        while (lo1 < hi1 && lo2 < hi2) {
+            dst[k++] = a1[lo1] < a2[lo2] ? a1[lo1++] : a2[lo2++];
+        }
+        if (dst != a1 || k < lo1) {
+            while (lo1 < hi1) {
+                dst[k++] = a1[lo1++];
+            }
+        }
+        if (dst != a2 || k < lo2) {
+            while (lo2 < hi2) {
+                dst[k++] = a2[lo2++];
+            }
+        }
+    }
+
+// [long]
+
+    /**
+     * Sorts the specified range of the array using parallel merge
+     * sort and/or Dual-Pivot Quicksort.
+     *
+     * To balance the faster splitting and parallelism of merge sort
+     * with the faster element partitioning of Quicksort, ranges are
+     * subdivided in tiers such that, if there is enough parallelism,
+     * the four-way parallel merge is started, still ensuring enough
+     * parallelism to process the partitions.
+     *
+     * @param a the array to be sorted
+     * @param parallelism the parallelism level
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(long[] a, int parallelism, int low, int high) {
+        int size = high - low;
+
+        if (parallelism > 1 && size > MIN_PARALLEL_SORT_SIZE) {
+            int depth = getDepth(parallelism, size >> 12);
+            long[] b = depth == 0 ? null : new long[size];
+            new Sorter(null, a, b, low, size, low, depth).invoke();
+        } else {
+            sort(null, a, 0, low, high);
         }
     }
 
     /**
-     * Sorts the specified range of the array using the given
-     * workspace array slice if possible for merging
+     * Sorts the specified array using the Dual-Pivot Quicksort and/or
+     * other sorts in special-cases, possibly with parallel partitions.
      *
-     * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
-     */
-    static void sort(short[] a, int left, int right,
-                     short[] work, int workBase, int workLen) {
-        // Use counting sort on large arrays
-        if (right - left > COUNTING_SORT_THRESHOLD_FOR_SHORT_OR_CHAR) {
-            int[] count = new int[NUM_SHORT_VALUES];
-
-            for (int i = left - 1; ++i <= right;
-                count[a[i] - Short.MIN_VALUE]++
-            );
-            for (int i = NUM_SHORT_VALUES, k = right + 1; k > left; ) {
-                while (count[--i] == 0);
-                short value = (short) (i + Short.MIN_VALUE);
-                int s = count[i];
-
-                do {
-                    a[--k] = value;
-                } while (--s > 0);
-            }
-        } else { // Use Dual-Pivot Quicksort on small arrays
-            doSort(a, left, right, work, workBase, workLen);
-        }
-    }
-
-    /** The number of distinct short values. */
-    private static final int NUM_SHORT_VALUES = 1 << 16;
-
-    /**
-     * Sorts the specified range of the array.
-     *
+     * @param sorter parallel context
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param bits the combination of recursion depth and bit flag, where
+     *        the right bit "0" indicates that array is the leftmost part
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void doSort(short[] a, int left, int right,
-                               short[] work, int workBase, int workLen) {
-        // Use Quicksort on small arrays
-        if (right - left < QUICKSORT_THRESHOLD) {
-            sort(a, left, right, true);
-            return;
-        }
-
-        /*
-         * Index run[i] is the start of i-th run
-         * (ascending or descending sequence).
-         */
-        int[] run = new int[MAX_RUN_COUNT + 1];
-        int count = 0; run[0] = left;
-
-        // Check if the array is nearly sorted
-        for (int k = left; k < right; run[count] = k) {
-            // Equal items in the beginning of the sequence
-            while (k < right && a[k] == a[k + 1])
-                k++;
-            if (k == right) break;  // Sequence finishes with equal items
-            if (a[k] < a[k + 1]) { // ascending
-                while (++k <= right && a[k - 1] <= a[k]);
-            } else if (a[k] > a[k + 1]) { // descending
-                while (++k <= right && a[k - 1] >= a[k]);
-                // Transform into an ascending sequence
-                for (int lo = run[count] - 1, hi = k; ++lo < --hi; ) {
-                    short t = a[lo]; a[lo] = a[hi]; a[hi] = t;
-                }
+    static void sort(Sorter sorter, long[] a, int bits, int low, int high) {
+        while (true) {
+            int end = high - 1, size = high - low;
+
+            /*
+             * Run mixed insertion sort on small non-leftmost parts.
+             */
+            if (size < MAX_MIXED_INSERTION_SORT_SIZE + bits && (bits & 1) > 0) {
+                mixedInsertionSort(a, low, high - 3 * ((size >> 5) << 3), high);
+                return;
             }
 
-            // Merge a transformed descending sequence followed by an
-            // ascending sequence
-            if (run[count] > left && a[run[count]] >= a[run[count] - 1]) {
-                count--;
+            /*
+             * Invoke insertion sort on small leftmost part.
+             */
+            if (size < MAX_INSERTION_SORT_SIZE) {
+                insertionSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Check if the whole array or large non-leftmost
+             * parts are nearly sorted and then merge runs.
+             */
+            if ((bits == 0 || size > MIN_TRY_MERGE_SIZE && (bits & 1) > 0)
+                    && tryMergeRuns(sorter, a, low, size)) {
+                return;
+            }
+
+            /*
+             * Switch to heap sort if execution
+             * time is becoming quadratic.
+             */
+            if ((bits += DELTA) > MAX_RECURSION_DEPTH) {
+                heapSort(a, low, high);
+                return;
             }
 
             /*
-             * The array is not highly structured,
-             * use Quicksort instead of merge sort.
+             * Use an inexpensive approximation of the golden ratio
+             * to select five sample elements and determine pivots.
+             */
+            int step = (size >> 3) * 3 + 3;
+
+            /*
+             * Five elements around (and including) the central element
+             * will be used for pivot selection as described below. The
+             * unequal choice of spacing these elements was empirically
+             * determined to work well on a wide variety of inputs.
              */
-            if (++count == MAX_RUN_COUNT) {
-                sort(a, left, right, true);
-                return;
+            int e1 = low + step;
+            int e5 = end - step;
+            int e3 = (e1 + e5) >>> 1;
+            int e2 = (e1 + e3) >>> 1;
+            int e4 = (e3 + e5) >>> 1;
+            long a3 = a[e3];
+
+            /*
+             * Sort these elements in place by the combination
+             * of 4-element sorting network and insertion sort.
+             *
+             *    5 ------o-----------o------------
+             *            |           |
+             *    4 ------|-----o-----o-----o------
+             *            |     |           |
+             *    2 ------o-----|-----o-----o------
+             *                  |     |
+             *    1 ------------o-----o------------
+             */
+            if (a[e5] < a[e2]) { long t = a[e5]; a[e5] = a[e2]; a[e2] = t; }
+            if (a[e4] < a[e1]) { long t = a[e4]; a[e4] = a[e1]; a[e1] = t; }
+            if (a[e5] < a[e4]) { long t = a[e5]; a[e5] = a[e4]; a[e4] = t; }
+            if (a[e2] < a[e1]) { long t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
+            if (a[e4] < a[e2]) { long t = a[e4]; a[e4] = a[e2]; a[e2] = t; }
+
+            if (a3 < a[e2]) {
+                if (a3 < a[e1]) {
+                    a[e3] = a[e2]; a[e2] = a[e1]; a[e1] = a3;
+                } else {
+                    a[e3] = a[e2]; a[e2] = a3;
+                }
+            } else if (a3 > a[e4]) {
+                if (a3 > a[e5]) {
+                    a[e3] = a[e4]; a[e4] = a[e5]; a[e5] = a3;
+                } else {
+                    a[e3] = a[e4]; a[e4] = a3;
+                }
             }
-        }
-
-        // These invariants should hold true:
-        //    run[0] = 0
-        //    run[<last>] = right + 1; (terminator)
-
-        if (count == 0) {
-            // A single equal run
-            return;
-        } else if (count == 1 && run[count] > right) {
-            // Either a single ascending or a transformed descending run.
-            // Always check that a final run is a proper terminator, otherwise
-            // we have an unterminated trailing run, to handle downstream.
-            return;
-        }
-        right++;
-        if (run[count] < right) {
-            // Corner case: the final run is not a terminator. This may happen
-            // if a final run is an equals run, or there is a single-element run
-            // at the end. Fix up by adding a proper terminator at the end.
-            // Note that we terminate with (right + 1), incremented earlier.
-            run[++count] = right;
-        }
-
-        // Determine alternation base for merge
-        byte odd = 0;
-        for (int n = 1; (n <<= 1) < count; odd ^= 1);
-
-        // Use or create temporary array b for merging
-        short[] b;                 // temp array; alternates with a
-        int ao, bo;              // array offsets from 'left'
-        int blen = right - left; // space needed for b
-        if (work == null || workLen < blen || workBase + blen > work.length) {
-            work = new short[blen];
-            workBase = 0;
-        }
-        if (odd == 0) {
-            System.arraycopy(a, left, work, workBase, blen);
-            b = a;
-            bo = 0;
-            a = work;
-            ao = workBase - left;
-        } else {
-            b = work;
-            ao = 0;
-            bo = workBase - left;
-        }
-
-        // Merging
-        for (int last; count > 1; count = last) {
-            for (int k = (last = 0) + 2; k <= count; k += 2) {
-                int hi = run[k], mi = run[k - 1];
-                for (int i = run[k - 2], p = i, q = mi; i < hi; ++i) {
-                    if (q >= hi || p < mi && a[p + ao] <= a[q + ao]) {
-                        b[i + bo] = a[p++ + ao];
-                    } else {
-                        b[i + bo] = a[q++ + ao];
+
+            // Pointers
+            int lower = low; // The index of the last element of the left part
+            int upper = end; // The index of the first element of the right part
+
+            /*
+             * Partitioning with 2 pivots in case of different elements.
+             */
+            if (a[e1] < a[e2] && a[e2] < a[e3] && a[e3] < a[e4] && a[e4] < a[e5]) {
+
+                /*
+                 * Use the first and fifth of the five sorted elements as
+                 * the pivots. These values are inexpensive approximation
+                 * of tertiles. Note, that pivot1 < pivot2.
+                 */
+                long pivot1 = a[e1];
+                long pivot2 = a[e5];
+
+                /*
+                 * The first and the last elements to be sorted are moved
+                 * to the locations formerly occupied by the pivots. When
+                 * partitioning is completed, the pivots are swapped back
+                 * into their final positions, and excluded from the next
+                 * subsequent sorting.
+                 */
+                a[e1] = a[lower];
+                a[e5] = a[upper];
+
+                /*
+                 * Skip elements, which are less or greater than the pivots.
+                 */
+                while (a[++lower] < pivot1);
+                while (a[--upper] > pivot2);
+
+                /*
+                 * Backward 3-interval partitioning
+                 *
+                 *   left part                 central part          right part
+                 * +------------------------------------------------------------+
+                 * |  < pivot1  |   ?   |  pivot1 <= && <= pivot2  |  > pivot2  |
+                 * +------------------------------------------------------------+
+                 *             ^       ^                            ^
+                 *             |       |                            |
+                 *           lower     k                          upper
+                 *
+                 * Invariants:
+                 *
+                 *              all in (low, lower] < pivot1
+                 *    pivot1 <= all in (k, upper)  <= pivot2
+                 *              all in [upper, end) > pivot2
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int unused = --lower, k = ++upper; --k > lower; ) {
+                    long ak = a[k];
+
+                    if (ak < pivot1) { // Move a[k] to the left side
+                        while (lower < k) {
+                            if (a[++lower] >= pivot1) {
+                                if (a[lower] > pivot2) {
+                                    a[k] = a[--upper];
+                                    a[upper] = a[lower];
+                                } else {
+                                    a[k] = a[lower];
+                                }
+                                a[lower] = ak;
+                                break;
+                            }
+                        }
+                    } else if (ak > pivot2) { // Move a[k] to the right side
+                        a[k] = a[--upper];
+                        a[upper] = ak;
                     }
                 }
-                run[++last] = hi;
+
+                /*
+                 * Swap the pivots into their final positions.
+                 */
+                a[low] = a[lower]; a[lower] = pivot1;
+                a[end] = a[upper]; a[upper] = pivot2;
+
+                /*
+                 * Sort non-left parts recursively (possibly in parallel),
+                 * excluding known pivots.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, lower + 1, upper);
+                    sorter.forkSorter(bits | 1, upper + 1, high);
+                } else {
+                    sort(sorter, a, bits | 1, lower + 1, upper);
+                    sort(sorter, a, bits | 1, upper + 1, high);
+                }
+
+            } else { // Use single pivot in case of many equal elements
+
+                /*
+                 * Use the third of the five sorted elements as the pivot.
+                 * This value is inexpensive approximation of the median.
+                 */
+                long pivot = a[e3];
+
+                /*
+                 * The first element to be sorted is moved to the
+                 * location formerly occupied by the pivot. After
+                 * completion of partitioning the pivot is swapped
+                 * back into its final position, and excluded from
+                 * the next subsequent sorting.
+                 */
+                a[e3] = a[lower];
+
+                /*
+                 * Traditional 3-way (Dutch National Flag) partitioning
+                 *
+                 *   left part                 central part    right part
+                 * +------------------------------------------------------+
+                 * |   < pivot   |     ?     |   == pivot   |   > pivot   |
+                 * +------------------------------------------------------+
+                 *              ^           ^                ^
+                 *              |           |                |
+                 *            lower         k              upper
+                 *
+                 * Invariants:
+                 *
+                 *   all in (low, lower] < pivot
+                 *   all in (k, upper)  == pivot
+                 *   all in [upper, end] > pivot
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int k = ++upper; --k > lower; ) {
+                    long ak = a[k];
+
+                    if (ak != pivot) {
+                        a[k] = pivot;
+
+                        if (ak < pivot) { // Move a[k] to the left side
+                            while (a[++lower] < pivot);
+
+                            if (a[lower] > pivot) {
+                                a[--upper] = a[lower];
+                            }
+                            a[lower] = ak;
+                        } else { // ak > pivot - Move a[k] to the right side
+                            a[--upper] = ak;
+                        }
+                    }
+                }
+
+                /*
+                 * Swap the pivot into its final position.
+                 */
+                a[low] = a[lower]; a[lower] = pivot;
+
+                /*
+                 * Sort the right part (possibly in parallel), excluding
+                 * known pivot. All elements from the central part are
+                 * equal and therefore already sorted.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, upper, high);
+                } else {
+                    sort(sorter, a, bits | 1, upper, high);
+                }
             }
-            if ((count & 1) != 0) {
-                for (int i = right, lo = run[count - 1]; --i >= lo;
-                    b[i + bo] = a[i + ao]
-                );
-                run[++last] = right;
-            }
-            short[] t = a; a = b; b = t;
-            int o = ao; ao = bo; bo = o;
+            high = lower; // Iterate along the left part
         }
     }
 
     /**
-     * Sorts the specified range of the array by Dual-Pivot Quicksort.
+     * Sorts the specified range of the array using mixed insertion sort.
+     *
+     * Mixed insertion sort is combination of simple insertion sort,
+     * pin insertion sort and pair insertion sort.
+     *
+     * In the context of Dual-Pivot Quicksort, the pivot element
+     * from the left part plays the role of sentinel, because it
+     * is less than any elements from the given part. Therefore,
+     * expensive check of the left range can be skipped on each
+     * iteration unless it is the leftmost call.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param leftmost indicates if this part is the leftmost in the range
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param end the index of the last element for simple insertion sort
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void sort(short[] a, int left, int right, boolean leftmost) {
-        int length = right - left + 1;
-
-        // Use insertion sort on tiny arrays
-        if (length < INSERTION_SORT_THRESHOLD) {
-            if (leftmost) {
-                /*
-                 * Traditional (without sentinel) insertion sort,
-                 * optimized for server VM, is used in case of
-                 * the leftmost part.
-                 */
-                for (int i = left, j = i; i < right; j = ++i) {
-                    short ai = a[i + 1];
-                    while (ai < a[j]) {
-                        a[j + 1] = a[j];
-                        if (j-- == left) {
-                            break;
-                        }
-                    }
-                    a[j + 1] = ai;
-                }
-            } else {
-                /*
-                 * Skip the longest ascending sequence.
-                 */
-                do {
-                    if (left >= right) {
-                        return;
-                    }
-                } while (a[++left] >= a[left - 1]);
-
-                /*
-                 * Every element from adjoining part plays the role
-                 * of sentinel, therefore this allows us to avoid the
-                 * left range check on each iteration. Moreover, we use
-                 * the more optimized algorithm, so called pair insertion
-                 * sort, which is faster (in the context of Quicksort)
-                 * than traditional implementation of insertion sort.
-                 */
-                for (int k = left; ++left <= right; k = ++left) {
-                    short a1 = a[k], a2 = a[left];
-
-                    if (a1 < a2) {
-                        a2 = a1; a1 = a[left];
-                    }
-                    while (a1 < a[--k]) {
-                        a[k + 2] = a[k];
-                    }
-                    a[++k + 1] = a1;
-
-                    while (a2 < a[--k]) {
-                        a[k + 1] = a[k];
-                    }
-                    a[k + 1] = a2;
-                }
-                short last = a[right];
-
-                while (last < a[--right]) {
-                    a[right + 1] = a[right];
-                }
-                a[right + 1] = last;
-            }
-            return;
-        }
-
-        // Inexpensive approximation of length / 7
-        int seventh = (length >> 3) + (length >> 6) + 1;
-
-        /*
-         * Sort five evenly spaced elements around (and including) the
-         * center element in the range. These elements will be used for
-         * pivot selection as described below. The choice for spacing
-         * these elements was empirically determined to work well on
-         * a wide variety of inputs.
-         */
-        int e3 = (left + right) >>> 1; // The midpoint
-        int e2 = e3 - seventh;
-        int e1 = e2 - seventh;
-        int e4 = e3 + seventh;
-        int e5 = e4 + seventh;
-
-        // Sort these elements using insertion sort
-        if (a[e2] < a[e1]) { short t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
-
-        if (a[e3] < a[e2]) { short t = a[e3]; a[e3] = a[e2]; a[e2] = t;
-            if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-        }
-        if (a[e4] < a[e3]) { short t = a[e4]; a[e4] = a[e3]; a[e3] = t;
-            if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-            }
-        }
-        if (a[e5] < a[e4]) { short t = a[e5]; a[e5] = a[e4]; a[e4] = t;
-            if (t < a[e3]) { a[e4] = a[e3]; a[e3] = t;
-                if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                    if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-                }
-            }
-        }
-
-        // Pointers
-        int less  = left;  // The index of the first element of center part
-        int great = right; // The index before the first element of right part
-
-        if (a[e1] != a[e2] && a[e2] != a[e3] && a[e3] != a[e4] && a[e4] != a[e5]) {
-            /*
-             * Use the second and fourth of the five sorted elements as pivots.
-             * These values are inexpensive approximations of the first and
-             * second terciles of the array. Note that pivot1 <= pivot2.
-             */
-            short pivot1 = a[e2];
-            short pivot2 = a[e4];
+    private static void mixedInsertionSort(long[] a, int low, int end, int high) {
+        if (end == high) {
 
             /*
-             * The first and the last elements to be sorted are moved to the
-             * locations formerly occupied by the pivots. When partitioning
-             * is complete, the pivots are swapped back into their final
-             * positions, and excluded from subsequent sorting.
+             * Invoke simple insertion sort on tiny array.
              */
-            a[e2] = a[left];
-            a[e4] = a[right];
-
-            /*
-             * Skip elements, which are less or greater than pivot values.
-             */
-            while (a[++less] < pivot1);
-            while (a[--great] > pivot2);
+            for (int i; ++low < end; ) {
+                long ai = a[i = low];
+
+                while (ai < a[--i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        } else {
 
             /*
-             * Partitioning:
-             *
-             *   left part           center part                   right part
-             * +--------------------------------------------------------------+
-             * |  < pivot1  |  pivot1 <= && <= pivot2  |    ?    |  > pivot2  |
-             * +--------------------------------------------------------------+
-             *               ^                          ^       ^
-             *               |                          |       |
-             *              less                        k     great
-             *
-             * Invariants:
-             *
-             *              all in (left, less)   < pivot1
-             *    pivot1 <= all in [less, k)     <= pivot2
-             *              all in (great, right) > pivot2
+             * Start with pin insertion sort on small part.
              *
-             * Pointer k is the first index of ?-part.
-             */
-            outer:
-            for (int k = less - 1; ++k <= great; ) {
-                short ak = a[k];
-                if (ak < pivot1) { // Move a[k] to left part
-                    a[k] = a[less];
-                    /*
-                     * Here and below we use "a[i] = b; i++;" instead
-                     * of "a[i++] = b;" due to performance issue.
-                     */
-                    a[less] = ak;
-                    ++less;
-                } else if (ak > pivot2) { // Move a[k] to right part
-                    while (a[great] > pivot2) {
-                        if (great-- == k) {
-                            break outer;
-                        }
-                    }
-                    if (a[great] < pivot1) { // a[great] <= pivot2
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // pivot1 <= a[great] <= pivot2
-                        a[k] = a[great];
-                    }
-                    /*
-                     * Here and below we use "a[i] = b; i--;" instead
-                     * of "a[i--] = b;" due to performance issue.
-                     */
-                    a[great] = ak;
-                    --great;
-                }
-            }
-
-            // Swap pivots into their final positions
-            a[left]  = a[less  - 1]; a[less  - 1] = pivot1;
-            a[right] = a[great + 1]; a[great + 1] = pivot2;
-
-            // Sort left and right parts recursively, excluding known pivots
-            sort(a, left, less - 2, leftmost);
-            sort(a, great + 2, right, false);
-
-            /*
-             * If center part is too large (comprises > 4/7 of the array),
-             * swap internal pivot values to ends.
+             * Pin insertion sort is extended simple insertion sort.
+             * The main idea of this sort is to put elements larger
+             * than an element called pin to the end of array (the
+             * proper area for such elements). It avoids expensive
+             * movements of these elements through the whole array.
              */
-            if (less < e1 && e5 < great) {
-                /*
-                 * Skip elements, which are equal to pivot values.
-                 */
-                while (a[less] == pivot1) {
-                    ++less;
-                }
-
-                while (a[great] == pivot2) {
-                    --great;
-                }
-
-                /*
-                 * Partitioning:
-                 *
-                 *   left part         center part                  right part
-                 * +----------------------------------------------------------+
-                 * | == pivot1 |  pivot1 < && < pivot2  |    ?    | == pivot2 |
-                 * +----------------------------------------------------------+
-                 *              ^                        ^       ^
-                 *              |                        |       |
-                 *             less                      k     great
-                 *
-                 * Invariants:
-                 *
-                 *              all in (*,  less) == pivot1
-                 *     pivot1 < all in [less,  k)  < pivot2
-                 *              all in (great, *) == pivot2
-                 *
-                 * Pointer k is the first index of ?-part.
-                 */
-                outer:
-                for (int k = less - 1; ++k <= great; ) {
-                    short ak = a[k];
-                    if (ak == pivot1) { // Move a[k] to left part
-                        a[k] = a[less];
-                        a[less] = ak;
-                        ++less;
-                    } else if (ak == pivot2) { // Move a[k] to right part
-                        while (a[great] == pivot2) {
-                            if (great-- == k) {
-                                break outer;
-                            }
-                        }
-                        if (a[great] == pivot1) { // a[great] < pivot2
-                            a[k] = a[less];
-                            /*
-                             * Even though a[great] equals to pivot1, the
-                             * assignment a[less] = pivot1 may be incorrect,
-                             * if a[great] and pivot1 are floating-point zeros
-                             * of different signs. Therefore in float and
-                             * double sorting methods we have to use more
-                             * accurate assignment a[less] = a[great].
-                             */
-                            a[less] = pivot1;
-                            ++less;
-                        } else { // pivot1 < a[great] < pivot2
-                            a[k] = a[great];
-                        }
-                        a[great] = ak;
-                        --great;
+            long pin = a[end];
+
+            for (int i, p = high; ++low < end; ) {
+                long ai = a[i = low];
+
+                if (ai < a[i - 1]) { // Small element
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    a[i] = a[--i];
+
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
                     }
-                }
-            }
-
-            // Sort center part recursively
-            sort(a, less, great, false);
-
-        } else { // Partitioning with one pivot
-            /*
-             * Use the third of the five sorted elements as pivot.
-             * This value is inexpensive approximation of the median.
-             */
-            short pivot = a[e3];
-
-            /*
-             * Partitioning degenerates to the traditional 3-way
-             * (or "Dutch National Flag") schema:
-             *
-             *   left part    center part              right part
-             * +-------------------------------------------------+
-             * |  < pivot  |   == pivot   |     ?    |  > pivot  |
-             * +-------------------------------------------------+
-             *              ^              ^        ^
-             *              |              |        |
-             *             less            k      great
-             *
-             * Invariants:
-             *
-             *   all in (left, less)   < pivot
-             *   all in [less, k)     == pivot
-             *   all in (great, right) > pivot
-             *
-             * Pointer k is the first index of ?-part.
-             */
-            for (int k = less; k <= great; ++k) {
-                if (a[k] == pivot) {
-                    continue;
-                }
-                short ak = a[k];
-                if (ak < pivot) { // Move a[k] to left part
-                    a[k] = a[less];
-                    a[less] = ak;
-                    ++less;
-                } else { // a[k] > pivot - Move a[k] to right part
-                    while (a[great] > pivot) {
-                        --great;
+                    a[i + 1] = ai;
+
+                } else if (p > i && ai > pin) { // Large element
+
+                    /*
+                     * Find element smaller than pin.
+                     */
+                    while (a[--p] > pin);
+
+                    /*
+                     * Swap it with large element.
+                     */
+                    if (p > i) {
+                        ai = a[p];
+                        a[p] = a[i];
                     }
-                    if (a[great] < pivot) { // a[great] <= pivot
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // a[great] == pivot
-                        /*
-                         * Even though a[great] equals to pivot, the
-                         * assignment a[k] = pivot may be incorrect,
-                         * if a[great] and pivot are floating-point
-                         * zeros of different signs. Therefore in float
-                         * and double sorting methods we have to use
-                         * more accurate assignment a[k] = a[great].
-                         */
-                        a[k] = pivot;
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
                     }
-                    a[great] = ak;
-                    --great;
+                    a[i + 1] = ai;
                 }
             }
 
             /*
-             * Sort left and right parts recursively.
-             * All elements from center part are equal
-             * and, therefore, already sorted.
+             * Continue with pair insertion sort on remain part.
              */
-            sort(a, left, less - 1, leftmost);
-            sort(a, great + 1, right, false);
+            for (int i; low < high; ++low) {
+                long a1 = a[i = low], a2 = a[++low];
+
+                /*
+                 * Insert two elements per iteration: at first, insert the
+                 * larger element and then insert the smaller element, but
+                 * from the position where the larger element was inserted.
+                 */
+                if (a1 > a2) {
+
+                    while (a1 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a1;
+
+                    while (a2 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a2;
+
+                } else if (a1 < a[i - 1]) {
+
+                    while (a2 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a2;
+
+                    while (a1 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a1;
+                }
+            }
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using insertion sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void insertionSort(long[] a, int low, int high) {
+        for (int i, k = low; ++k < high; ) {
+            long ai = a[i = k];
+
+            if (ai < a[i - 1]) {
+                while (--i >= low && ai < a[i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using heap sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void heapSort(long[] a, int low, int high) {
+        for (int k = (low + high) >>> 1; k > low; ) {
+            pushDown(a, --k, a[k], low, high);
+        }
+        while (--high > low) {
+            long max = a[low];
+            pushDown(a, low, a[high], low, high);
+            a[high] = max;
         }
     }
 
     /**
-     * Sorts the specified range of the array using the given
-     * workspace array slice if possible for merging
+     * Pushes specified element down during heap sort.
      *
-     * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param a the given array
+     * @param p the start index
+     * @param value the given element
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    static void sort(char[] a, int left, int right,
-                     char[] work, int workBase, int workLen) {
-        // Use counting sort on large arrays
-        if (right - left > COUNTING_SORT_THRESHOLD_FOR_SHORT_OR_CHAR) {
-            int[] count = new int[NUM_CHAR_VALUES];
-
-            for (int i = left - 1; ++i <= right;
-                count[a[i]]++
-            );
-            for (int i = NUM_CHAR_VALUES, k = right + 1; k > left; ) {
-                while (count[--i] == 0);
-                char value = (char) i;
-                int s = count[i];
-
-                do {
-                    a[--k] = value;
-                } while (--s > 0);
+    private static void pushDown(long[] a, int p, long value, int low, int high) {
+        for (int k ;; a[p] = a[p = k]) {
+            k = (p << 1) - low + 2; // Index of the right child
+
+            if (k > high) {
+                break;
             }
-        } else { // Use Dual-Pivot Quicksort on small arrays
-            doSort(a, left, right, work, workBase, workLen);
+            if (k == high || a[k] < a[k - 1]) {
+                --k;
+            }
+            if (a[k] <= value) {
+                break;
+            }
         }
+        a[p] = value;
     }
 
-    /** The number of distinct char values. */
-    private static final int NUM_CHAR_VALUES = 1 << 16;
-
     /**
-     * Sorts the specified range of the array.
+     * Tries to sort the specified range of the array.
      *
+     * @param sorter parallel context
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param low the index of the first element to be sorted
+     * @param size the array size
+     * @return true if finally sorted, false otherwise
      */
-    private static void doSort(char[] a, int left, int right,
-                               char[] work, int workBase, int workLen) {
-        // Use Quicksort on small arrays
-        if (right - left < QUICKSORT_THRESHOLD) {
-            sort(a, left, right, true);
-            return;
+    private static boolean tryMergeRuns(Sorter sorter, long[] a, int low, int size) {
+
+        /*
+         * The run array is constructed only if initial runs are
+         * long enough to continue, run[i] then holds start index
+         * of the i-th sequence of elements in non-descending order.
+         */
+        int[] run = null;
+        int high = low + size;
+        int count = 1, last = low;
+
+        /*
+         * Identify all possible runs.
+         */
+        for (int k = low + 1; k < high; ) {
+
+            /*
+             * Find the end index of the current run.
+             */
+            if (a[k - 1] < a[k]) {
+
+                // Identify ascending sequence
+                while (++k < high && a[k - 1] <= a[k]);
+
+            } else if (a[k - 1] > a[k]) {
+
+                // Identify descending sequence
+                while (++k < high && a[k - 1] >= a[k]);
+
+                // Reverse into ascending order
+                for (int i = last - 1, j = k; ++i < --j && a[i] > a[j]; ) {
+                    long ai = a[i]; a[i] = a[j]; a[j] = ai;
+                }
+            } else { // Identify constant sequence
+                for (long ak = a[k]; ++k < high && ak == a[k]; );
+
+                if (k < high) {
+                    continue;
+                }
+            }
+
+            /*
+             * Check special cases.
+             */
+            if (run == null) {
+                if (k == high) {
+
+                    /*
+                     * The array is monotonous sequence,
+                     * and therefore already sorted.
+                     */
+                    return true;
+                }
+
+                if (k - low < MIN_FIRST_RUN_SIZE) {
+
+                    /*
+                     * The first run is too small
+                     * to proceed with scanning.
+                     */
+                    return false;
+                }
+
+                run = new int[((size >> 10) | 0x7F) & 0x3FF];
+                run[0] = low;
+
+            } else if (a[last - 1] > a[last]) {
+
+                if (count > (k - low) >> MIN_FIRST_RUNS_FACTOR) {
+
+                    /*
+                     * The first runs are not long
+                     * enough to continue scanning.
+                     */
+                    return false;
+                }
+
+                if (++count == MAX_RUN_CAPACITY) {
+
+                    /*
+                     * Array is not highly structured.
+                     */
+                    return false;
+                }
+
+                if (count == run.length) {
+
+                    /*
+                     * Increase capacity of index array.
+                     */
+                    run = Arrays.copyOf(run, count << 1);
+                }
+            }
+            run[count] = (last = k);
         }
 
         /*
-         * Index run[i] is the start of i-th run
-         * (ascending or descending sequence).
+         * Merge runs of highly structured array.
+         */
+        if (count > 1) {
+            long[] b; int offset = low;
+
+            if (sorter == null || (b = (long[]) sorter.b) == null) {
+                b = new long[size];
+            } else {
+                offset = sorter.offset;
+            }
+            mergeRuns(a, b, offset, 1, sorter != null, run, 0, count);
+        }
+        return true;
+    }
+
+    /**
+     * Merges the specified runs.
+     *
+     * @param a the source array
+     * @param b the temporary buffer used in merging
+     * @param offset the start index in the source, inclusive
+     * @param aim specifies merging: to source ( > 0), buffer ( < 0) or any ( == 0)
+     * @param parallel indicates whether merging is performed in parallel
+     * @param run the start indexes of the runs, inclusive
+     * @param lo the start index of the first run, inclusive
+     * @param hi the start index of the last run, inclusive
+     * @return the destination where runs are merged
+     */
+    private static long[] mergeRuns(long[] a, long[] b, int offset,
+            int aim, boolean parallel, int[] run, int lo, int hi) {
+
+        if (hi - lo == 1) {
+            if (aim >= 0) {
+                return a;
+            }
+            for (int i = run[hi], j = i - offset, low = run[lo]; i > low;
+                b[--j] = a[--i]
+            );
+            return b;
+        }
+
+        /*
+         * Split into approximately equal parts.
+         */
+        int mi = lo, rmi = (run[lo] + run[hi]) >>> 1;
+        while (run[++mi + 1] <= rmi);
+
+        /*
+         * Merge the left and right parts.
          */
-        int[] run = new int[MAX_RUN_COUNT + 1];
-        int count = 0; run[0] = left;
-
-        // Check if the array is nearly sorted
-        for (int k = left; k < right; run[count] = k) {
-            // Equal items in the beginning of the sequence
-            while (k < right && a[k] == a[k + 1])
-                k++;
-            if (k == right) break;  // Sequence finishes with equal items
-            if (a[k] < a[k + 1]) { // ascending
-                while (++k <= right && a[k - 1] <= a[k]);
-            } else if (a[k] > a[k + 1]) { // descending
-                while (++k <= right && a[k - 1] >= a[k]);
-                // Transform into an ascending sequence
-                for (int lo = run[count] - 1, hi = k; ++lo < --hi; ) {
-                    char t = a[lo]; a[lo] = a[hi]; a[hi] = t;
+        long[] a1, a2;
+
+        if (parallel && hi - lo > MIN_RUN_COUNT) {
+            RunMerger merger = new RunMerger(a, b, offset, 0, run, mi, hi).forkMe();
+            a1 = mergeRuns(a, b, offset, -aim, true, run, lo, mi);
+            a2 = (long[]) merger.getDestination();
+        } else {
+            a1 = mergeRuns(a, b, offset, -aim, false, run, lo, mi);
+            a2 = mergeRuns(a, b, offset,    0, false, run, mi, hi);
+        }
+
+        long[] dst = a1 == a ? b : a;
+
+        int k   = a1 == a ? run[lo] - offset : run[lo];
+        int lo1 = a1 == b ? run[lo] - offset : run[lo];
+        int hi1 = a1 == b ? run[mi] - offset : run[mi];
+        int lo2 = a2 == b ? run[mi] - offset : run[mi];
+        int hi2 = a2 == b ? run[hi] - offset : run[hi];
+
+        if (parallel) {
+            new Merger(null, dst, k, a1, lo1, hi1, a2, lo2, hi2).invoke();
+        } else {
+            mergeParts(null, dst, k, a1, lo1, hi1, a2, lo2, hi2);
+        }
+        return dst;
+    }
+
+    /**
+     * Merges the sorted parts.
+     *
+     * @param merger parallel context
+     * @param dst the destination where parts are merged
+     * @param k the start index of the destination, inclusive
+     * @param a1 the first part
+     * @param lo1 the start index of the first part, inclusive
+     * @param hi1 the end index of the first part, exclusive
+     * @param a2 the second part
+     * @param lo2 the start index of the second part, inclusive
+     * @param hi2 the end index of the second part, exclusive
+     */
+    private static void mergeParts(Merger merger, long[] dst, int k,
+            long[] a1, int lo1, int hi1, long[] a2, int lo2, int hi2) {
+
+        if (merger != null && a1 == a2) {
+
+            while (true) {
+
+                /*
+                 * The first part must be larger.
+                 */
+                if (hi1 - lo1 < hi2 - lo2) {
+                    int lo = lo1; lo1 = lo2; lo2 = lo;
+                    int hi = hi1; hi1 = hi2; hi2 = hi;
                 }
-            }
-
-            // Merge a transformed descending sequence followed by an
-            // ascending sequence
-            if (run[count] > left && a[run[count]] >= a[run[count] - 1]) {
-                count--;
-            }
-
-            /*
-             * The array is not highly structured,
-             * use Quicksort instead of merge sort.
-             */
-            if (++count == MAX_RUN_COUNT) {
-                sort(a, left, right, true);
-                return;
+
+                /*
+                 * Small parts will be merged sequentially.
+                 */
+                if (hi1 - lo1 < MIN_PARALLEL_MERGE_PARTS_SIZE) {
+                    break;
+                }
+
+                /*
+                 * Find the median of the larger part.
+                 */
+                int mi1 = (lo1 + hi1) >>> 1;
+                long key = a1[mi1];
+                int mi2 = hi2;
+
+                /*
+                 * Partition the smaller part.
+                 */
+                for (int loo = lo2; loo < mi2; ) {
+                    int t = (loo + mi2) >>> 1;
+
+                    if (key > a2[t]) {
+                        loo = t + 1;
+                    } else {
+                        mi2 = t;
+                    }
+                }
+
+                int d = mi2 - lo2 + mi1 - lo1;
+
+                /*
+                 * Merge the right sub-parts in parallel.
+                 */
+                merger.forkMerger(dst, k + d, a1, mi1, hi1, a2, mi2, hi2);
+
+                /*
+                 * Process the sub-left parts.
+                 */
+                hi1 = mi1;
+                hi2 = mi2;
             }
         }
 
-        // These invariants should hold true:
-        //    run[0] = 0
-        //    run[<last>] = right + 1; (terminator)
-
-        if (count == 0) {
-            // A single equal run
-            return;
-        } else if (count == 1 && run[count] > right) {
-            // Either a single ascending or a transformed descending run.
-            // Always check that a final run is a proper terminator, otherwise
-            // we have an unterminated trailing run, to handle downstream.
-            return;
+        /*
+         * Merge small parts sequentially.
+         */
+        while (lo1 < hi1 && lo2 < hi2) {
+            dst[k++] = a1[lo1] < a2[lo2] ? a1[lo1++] : a2[lo2++];
         }
-        right++;
-        if (run[count] < right) {
-            // Corner case: the final run is not a terminator. This may happen
-            // if a final run is an equals run, or there is a single-element run
-            // at the end. Fix up by adding a proper terminator at the end.
-            // Note that we terminate with (right + 1), incremented earlier.
-            run[++count] = right;
+        if (dst != a1 || k < lo1) {
+            while (lo1 < hi1) {
+                dst[k++] = a1[lo1++];
+            }
         }
-
-        // Determine alternation base for merge
-        byte odd = 0;
-        for (int n = 1; (n <<= 1) < count; odd ^= 1);
-
-        // Use or create temporary array b for merging
-        char[] b;                 // temp array; alternates with a
-        int ao, bo;              // array offsets from 'left'
-        int blen = right - left; // space needed for b
-        if (work == null || workLen < blen || workBase + blen > work.length) {
-            work = new char[blen];
-            workBase = 0;
+        if (dst != a2 || k < lo2) {
+            while (lo2 < hi2) {
+                dst[k++] = a2[lo2++];
+            }
         }
-        if (odd == 0) {
-            System.arraycopy(a, left, work, workBase, blen);
-            b = a;
-            bo = 0;
-            a = work;
-            ao = workBase - left;
+    }
+
+// [byte]
+
+    /**
+     * Sorts the specified range of the array using
+     * counting sort or insertion sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(byte[] a, int low, int high) {
+        if (high - low > MIN_BYTE_COUNTING_SORT_SIZE) {
+            countingSort(a, low, high);
         } else {
-            b = work;
-            ao = 0;
-            bo = workBase - left;
-        }
-
-        // Merging
-        for (int last; count > 1; count = last) {
-            for (int k = (last = 0) + 2; k <= count; k += 2) {
-                int hi = run[k], mi = run[k - 1];
-                for (int i = run[k - 2], p = i, q = mi; i < hi; ++i) {
-                    if (q >= hi || p < mi && a[p + ao] <= a[q + ao]) {
-                        b[i + bo] = a[p++ + ao];
-                    } else {
-                        b[i + bo] = a[q++ + ao];
-                    }
-                }
-                run[++last] = hi;
-            }
-            if ((count & 1) != 0) {
-                for (int i = right, lo = run[count - 1]; --i >= lo;
-                    b[i + bo] = a[i + ao]
-                );
-                run[++last] = right;
-            }
-            char[] t = a; a = b; b = t;
-            int o = ao; ao = bo; bo = o;
+            insertionSort(a, low, high);
         }
     }
 
     /**
-     * Sorts the specified range of the array by Dual-Pivot Quicksort.
+     * Sorts the specified range of the array using insertion sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void insertionSort(byte[] a, int low, int high) {
+        for (int i, k = low; ++k < high; ) {
+            byte ai = a[i = k];
+
+            if (ai < a[i - 1]) {
+                while (--i >= low && ai < a[i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        }
+    }
+
+    /**
+     * The number of distinct byte values.
+     */
+    private static final int NUM_BYTE_VALUES = 1 << 8;
+
+    /**
+     * Max index of byte counter.
+     */
+    private static final int MAX_BYTE_INDEX = Byte.MAX_VALUE + NUM_BYTE_VALUES + 1;
+
+    /**
+     * Sorts the specified range of the array using counting sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void countingSort(byte[] a, int low, int high) {
+        int[] count = new int[NUM_BYTE_VALUES];
+
+        /*
+         * Compute a histogram with the number of each values.
+         */
+        for (int i = high; i > low; ++count[a[--i] & 0xFF]);
+
+        /*
+         * Place values on their final positions.
+         */
+        if (high - low > NUM_BYTE_VALUES) {
+            for (int i = MAX_BYTE_INDEX; --i > Byte.MAX_VALUE; ) {
+                int value = i & 0xFF;
+
+                for (low = high - count[value]; high > low;
+                    a[--high] = (byte) value
+                );
+            }
+        } else {
+            for (int i = MAX_BYTE_INDEX; high > low; ) {
+                while (count[--i & 0xFF] == 0);
+
+                int value = i & 0xFF;
+                int c = count[value];
+
+                do {
+                    a[--high] = (byte) value;
+                } while (--c > 0);
+            }
+        }
+    }
+
+// [char]
+
+    /**
+     * Sorts the specified range of the array using
+     * counting sort or Dual-Pivot Quicksort.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param leftmost indicates if this part is the leftmost in the range
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(char[] a, int low, int high) {
+        if (high - low > MIN_SHORT_OR_CHAR_COUNTING_SORT_SIZE) {
+            countingSort(a, low, high);
+        } else {
+            sort(a, 0, low, high);
+        }
+    }
+
+    /**
+     * Sorts the specified array using the Dual-Pivot Quicksort and/or
+     * other sorts in special-cases, possibly with parallel partitions.
+     *
+     * @param a the array to be sorted
+     * @param bits the combination of recursion depth and bit flag, where
+     *        the right bit "0" indicates that array is the leftmost part
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void sort(char[] a, int left, int right, boolean leftmost) {
-        int length = right - left + 1;
-
-        // Use insertion sort on tiny arrays
-        if (length < INSERTION_SORT_THRESHOLD) {
-            if (leftmost) {
-                /*
-                 * Traditional (without sentinel) insertion sort,
-                 * optimized for server VM, is used in case of
-                 * the leftmost part.
-                 */
-                for (int i = left, j = i; i < right; j = ++i) {
-                    char ai = a[i + 1];
-                    while (ai < a[j]) {
-                        a[j + 1] = a[j];
-                        if (j-- == left) {
-                            break;
-                        }
-                    }
-                    a[j + 1] = ai;
+    static void sort(char[] a, int bits, int low, int high) {
+        while (true) {
+            int end = high - 1, size = high - low;
+
+            /*
+             * Invoke insertion sort on small leftmost part.
+             */
+            if (size < MAX_INSERTION_SORT_SIZE) {
+                insertionSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Switch to counting sort if execution
+             * time is becoming quadratic.
+             */
+            if ((bits += DELTA) > MAX_RECURSION_DEPTH) {
+                countingSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Use an inexpensive approximation of the golden ratio
+             * to select five sample elements and determine pivots.
+             */
+            int step = (size >> 3) * 3 + 3;
+
+            /*
+             * Five elements around (and including) the central element
+             * will be used for pivot selection as described below. The
+             * unequal choice of spacing these elements was empirically
+             * determined to work well on a wide variety of inputs.
+             */
+            int e1 = low + step;
+            int e5 = end - step;
+            int e3 = (e1 + e5) >>> 1;
+            int e2 = (e1 + e3) >>> 1;
+            int e4 = (e3 + e5) >>> 1;
+            char a3 = a[e3];
+
+            /*
+             * Sort these elements in place by the combination
+             * of 4-element sorting network and insertion sort.
+             *
+             *    5 ------o-----------o------------
+             *            |           |
+             *    4 ------|-----o-----o-----o------
+             *            |     |           |
+             *    2 ------o-----|-----o-----o------
+             *                  |     |
+             *    1 ------------o-----o------------
+             */
+            if (a[e5] < a[e2]) { char t = a[e5]; a[e5] = a[e2]; a[e2] = t; }
+            if (a[e4] < a[e1]) { char t = a[e4]; a[e4] = a[e1]; a[e1] = t; }
+            if (a[e5] < a[e4]) { char t = a[e5]; a[e5] = a[e4]; a[e4] = t; }
+            if (a[e2] < a[e1]) { char t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
+            if (a[e4] < a[e2]) { char t = a[e4]; a[e4] = a[e2]; a[e2] = t; }
+
+            if (a3 < a[e2]) {
+                if (a3 < a[e1]) {
+                    a[e3] = a[e2]; a[e2] = a[e1]; a[e1] = a3;
+                } else {
+                    a[e3] = a[e2]; a[e2] = a3;
                 }
-            } else {
-                /*
-                 * Skip the longest ascending sequence.
-                 */
-                do {
-                    if (left >= right) {
-                        return;
-                    }
-                } while (a[++left] >= a[left - 1]);
-
-                /*
-                 * Every element from adjoining part plays the role
-                 * of sentinel, therefore this allows us to avoid the
-                 * left range check on each iteration. Moreover, we use
-                 * the more optimized algorithm, so called pair insertion
-                 * sort, which is faster (in the context of Quicksort)
-                 * than traditional implementation of insertion sort.
-                 */
-                for (int k = left; ++left <= right; k = ++left) {
-                    char a1 = a[k], a2 = a[left];
-
-                    if (a1 < a2) {
-                        a2 = a1; a1 = a[left];
-                    }
-                    while (a1 < a[--k]) {
-                        a[k + 2] = a[k];
-                    }
-                    a[++k + 1] = a1;
-
-                    while (a2 < a[--k]) {
-                        a[k + 1] = a[k];
-                    }
-                    a[k + 1] = a2;
-                }
-                char last = a[right];
-
-                while (last < a[--right]) {
-                    a[right + 1] = a[right];
-                }
-                a[right + 1] = last;
-            }
-            return;
-        }
-
-        // Inexpensive approximation of length / 7
-        int seventh = (length >> 3) + (length >> 6) + 1;
-
-        /*
-         * Sort five evenly spaced elements around (and including) the
-         * center element in the range. These elements will be used for
-         * pivot selection as described below. The choice for spacing
-         * these elements was empirically determined to work well on
-         * a wide variety of inputs.
-         */
-        int e3 = (left + right) >>> 1; // The midpoint
-        int e2 = e3 - seventh;
-        int e1 = e2 - seventh;
-        int e4 = e3 + seventh;
-        int e5 = e4 + seventh;
-
-        // Sort these elements using insertion sort
-        if (a[e2] < a[e1]) { char t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
-
-        if (a[e3] < a[e2]) { char t = a[e3]; a[e3] = a[e2]; a[e2] = t;
-            if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-        }
-        if (a[e4] < a[e3]) { char t = a[e4]; a[e4] = a[e3]; a[e3] = t;
-            if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-            }
-        }
-        if (a[e5] < a[e4]) { char t = a[e5]; a[e5] = a[e4]; a[e4] = t;
-            if (t < a[e3]) { a[e4] = a[e3]; a[e3] = t;
-                if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                    if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
+            } else if (a3 > a[e4]) {
+                if (a3 > a[e5]) {
+                    a[e3] = a[e4]; a[e4] = a[e5]; a[e5] = a3;
+                } else {
+                    a[e3] = a[e4]; a[e4] = a3;
                 }
             }
-        }
-
-        // Pointers
-        int less  = left;  // The index of the first element of center part
-        int great = right; // The index before the first element of right part
-
-        if (a[e1] != a[e2] && a[e2] != a[e3] && a[e3] != a[e4] && a[e4] != a[e5]) {
-            /*
-             * Use the second and fourth of the five sorted elements as pivots.
-             * These values are inexpensive approximations of the first and
-             * second terciles of the array. Note that pivot1 <= pivot2.
-             */
-            char pivot1 = a[e2];
-            char pivot2 = a[e4];
-
-            /*
-             * The first and the last elements to be sorted are moved to the
-             * locations formerly occupied by the pivots. When partitioning
-             * is complete, the pivots are swapped back into their final
-             * positions, and excluded from subsequent sorting.
-             */
-            a[e2] = a[left];
-            a[e4] = a[right];
-
-            /*
-             * Skip elements, which are less or greater than pivot values.
-             */
-            while (a[++less] < pivot1);
-            while (a[--great] > pivot2);
+
+            // Pointers
+            int lower = low; // The index of the last element of the left part
+            int upper = end; // The index of the first element of the right part
 
             /*
-             * Partitioning:
-             *
-             *   left part           center part                   right part
-             * +--------------------------------------------------------------+
-             * |  < pivot1  |  pivot1 <= && <= pivot2  |    ?    |  > pivot2  |
-             * +--------------------------------------------------------------+
-             *               ^                          ^       ^
-             *               |                          |       |
-             *              less                        k     great
-             *
-             * Invariants:
-             *
-             *              all in (left, less)   < pivot1
-             *    pivot1 <= all in [less, k)     <= pivot2
-             *              all in (great, right) > pivot2
-             *
-             * Pointer k is the first index of ?-part.
+             * Partitioning with 2 pivots in case of different elements.
              */
-            outer:
-            for (int k = less - 1; ++k <= great; ) {
-                char ak = a[k];
-                if (ak < pivot1) { // Move a[k] to left part
-                    a[k] = a[less];
-                    /*
-                     * Here and below we use "a[i] = b; i++;" instead
-                     * of "a[i++] = b;" due to performance issue.
-                     */
-                    a[less] = ak;
-                    ++less;
-                } else if (ak > pivot2) { // Move a[k] to right part
-                    while (a[great] > pivot2) {
-                        if (great-- == k) {
-                            break outer;
+            if (a[e1] < a[e2] && a[e2] < a[e3] && a[e3] < a[e4] && a[e4] < a[e5]) {
+
+                /*
+                 * Use the first and fifth of the five sorted elements as
+                 * the pivots. These values are inexpensive approximation
+                 * of tertiles. Note, that pivot1 < pivot2.
+                 */
+                char pivot1 = a[e1];
+                char pivot2 = a[e5];
+
+                /*
+                 * The first and the last elements to be sorted are moved
+                 * to the locations formerly occupied by the pivots. When
+                 * partitioning is completed, the pivots are swapped back
+                 * into their final positions, and excluded from the next
+                 * subsequent sorting.
+                 */
+                a[e1] = a[lower];
+                a[e5] = a[upper];
+
+                /*
+                 * Skip elements, which are less or greater than the pivots.
+                 */
+                while (a[++lower] < pivot1);
+                while (a[--upper] > pivot2);
+
+                /*
+                 * Backward 3-interval partitioning
+                 *
+                 *   left part                 central part          right part
+                 * +------------------------------------------------------------+
+                 * |  < pivot1  |   ?   |  pivot1 <= && <= pivot2  |  > pivot2  |
+                 * +------------------------------------------------------------+
+                 *             ^       ^                            ^
+                 *             |       |                            |
+                 *           lower     k                          upper
+                 *
+                 * Invariants:
+                 *
+                 *              all in (low, lower] < pivot1
+                 *    pivot1 <= all in (k, upper)  <= pivot2
+                 *              all in [upper, end) > pivot2
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int unused = --lower, k = ++upper; --k > lower; ) {
+                    char ak = a[k];
+
+                    if (ak < pivot1) { // Move a[k] to the left side
+                        while (lower < k) {
+                            if (a[++lower] >= pivot1) {
+                                if (a[lower] > pivot2) {
+                                    a[k] = a[--upper];
+                                    a[upper] = a[lower];
+                                } else {
+                                    a[k] = a[lower];
+                                }
+                                a[lower] = ak;
+                                break;
+                            }
                         }
+                    } else if (ak > pivot2) { // Move a[k] to the right side
+                        a[k] = a[--upper];
+                        a[upper] = ak;
                     }
-                    if (a[great] < pivot1) { // a[great] <= pivot2
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // pivot1 <= a[great] <= pivot2
-                        a[k] = a[great];
-                    }
-                    /*
-                     * Here and below we use "a[i] = b; i--;" instead
-                     * of "a[i--] = b;" due to performance issue.
-                     */
-                    a[great] = ak;
-                    --great;
-                }
-            }
-
-            // Swap pivots into their final positions
-            a[left]  = a[less  - 1]; a[less  - 1] = pivot1;
-            a[right] = a[great + 1]; a[great + 1] = pivot2;
-
-            // Sort left and right parts recursively, excluding known pivots
-            sort(a, left, less - 2, leftmost);
-            sort(a, great + 2, right, false);
-
-            /*
-             * If center part is too large (comprises > 4/7 of the array),
-             * swap internal pivot values to ends.
-             */
-            if (less < e1 && e5 < great) {
-                /*
-                 * Skip elements, which are equal to pivot values.
-                 */
-                while (a[less] == pivot1) {
-                    ++less;
-                }
-
-                while (a[great] == pivot2) {
-                    --great;
                 }
 
                 /*
-                 * Partitioning:
+                 * Swap the pivots into their final positions.
+                 */
+                a[low] = a[lower]; a[lower] = pivot1;
+                a[end] = a[upper]; a[upper] = pivot2;
+
+                /*
+                 * Sort non-left parts recursively,
+                 * excluding known pivots.
+                 */
+                sort(a, bits | 1, lower + 1, upper);
+                sort(a, bits | 1, upper + 1, high);
+
+            } else { // Use single pivot in case of many equal elements
+
+                /*
+                 * Use the third of the five sorted elements as the pivot.
+                 * This value is inexpensive approximation of the median.
+                 */
+                char pivot = a[e3];
+
+                /*
+                 * The first element to be sorted is moved to the
+                 * location formerly occupied by the pivot. After
+                 * completion of partitioning the pivot is swapped
+                 * back into its final position, and excluded from
+                 * the next subsequent sorting.
+                 */
+                a[e3] = a[lower];
+
+                /*
+                 * Traditional 3-way (Dutch National Flag) partitioning
                  *
-                 *   left part         center part                  right part
-                 * +----------------------------------------------------------+
-                 * | == pivot1 |  pivot1 < && < pivot2  |    ?    | == pivot2 |
-                 * +----------------------------------------------------------+
-                 *              ^                        ^       ^
-                 *              |                        |       |
-                 *             less                      k     great
+                 *   left part                 central part    right part
+                 * +------------------------------------------------------+
+                 * |   < pivot   |     ?     |   == pivot   |   > pivot   |
+                 * +------------------------------------------------------+
+                 *              ^           ^                ^
+                 *              |           |                |
+                 *            lower         k              upper
                  *
                  * Invariants:
                  *
-                 *              all in (*,  less) == pivot1
-                 *     pivot1 < all in [less,  k)  < pivot2
-                 *              all in (great, *) == pivot2
+                 *   all in (low, lower] < pivot
+                 *   all in (k, upper)  == pivot
+                 *   all in [upper, end] > pivot
                  *
-                 * Pointer k is the first index of ?-part.
+                 * Pointer k is the last index of ?-part
                  */
-                outer:
-                for (int k = less - 1; ++k <= great; ) {
+                for (int k = ++upper; --k > lower; ) {
                     char ak = a[k];
-                    if (ak == pivot1) { // Move a[k] to left part
-                        a[k] = a[less];
-                        a[less] = ak;
-                        ++less;
-                    } else if (ak == pivot2) { // Move a[k] to right part
-                        while (a[great] == pivot2) {
-                            if (great-- == k) {
-                                break outer;
+
+                    if (ak != pivot) {
+                        a[k] = pivot;
+
+                        if (ak < pivot) { // Move a[k] to the left side
+                            while (a[++lower] < pivot);
+
+                            if (a[lower] > pivot) {
+                                a[--upper] = a[lower];
                             }
+                            a[lower] = ak;
+                        } else { // ak > pivot - Move a[k] to the right side
+                            a[--upper] = ak;
                         }
-                        if (a[great] == pivot1) { // a[great] < pivot2
-                            a[k] = a[less];
-                            /*
-                             * Even though a[great] equals to pivot1, the
-                             * assignment a[less] = pivot1 may be incorrect,
-                             * if a[great] and pivot1 are floating-point zeros
-                             * of different signs. Therefore in float and
-                             * double sorting methods we have to use more
-                             * accurate assignment a[less] = a[great].
-                             */
-                            a[less] = pivot1;
-                            ++less;
-                        } else { // pivot1 < a[great] < pivot2
-                            a[k] = a[great];
-                        }
-                        a[great] = ak;
-                        --great;
                     }
                 }
+
+                /*
+                 * Swap the pivot into its final position.
+                 */
+                a[low] = a[lower]; a[lower] = pivot;
+
+                /*
+                 * Sort the right part, excluding known pivot.
+                 * All elements from the central part are
+                 * equal and therefore already sorted.
+                 */
+                sort(a, bits | 1, upper, high);
             }
-
-            // Sort center part recursively
-            sort(a, less, great, false);
-
-        } else { // Partitioning with one pivot
-            /*
-             * Use the third of the five sorted elements as pivot.
-             * This value is inexpensive approximation of the median.
-             */
-            char pivot = a[e3];
-
-            /*
-             * Partitioning degenerates to the traditional 3-way
-             * (or "Dutch National Flag") schema:
-             *
-             *   left part    center part              right part
-             * +-------------------------------------------------+
-             * |  < pivot  |   == pivot   |     ?    |  > pivot  |
-             * +-------------------------------------------------+
-             *              ^              ^        ^
-             *              |              |        |
-             *             less            k      great
-             *
-             * Invariants:
-             *
-             *   all in (left, less)   < pivot
-             *   all in [less, k)     == pivot
-             *   all in (great, right) > pivot
-             *
-             * Pointer k is the first index of ?-part.
-             */
-            for (int k = less; k <= great; ++k) {
-                if (a[k] == pivot) {
-                    continue;
-                }
-                char ak = a[k];
-                if (ak < pivot) { // Move a[k] to left part
-                    a[k] = a[less];
-                    a[less] = ak;
-                    ++less;
-                } else { // a[k] > pivot - Move a[k] to right part
-                    while (a[great] > pivot) {
-                        --great;
-                    }
-                    if (a[great] < pivot) { // a[great] <= pivot
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // a[great] == pivot
-                        /*
-                         * Even though a[great] equals to pivot, the
-                         * assignment a[k] = pivot may be incorrect,
-                         * if a[great] and pivot are floating-point
-                         * zeros of different signs. Therefore in float
-                         * and double sorting methods we have to use
-                         * more accurate assignment a[k] = a[great].
-                         */
-                        a[k] = pivot;
-                    }
-                    a[great] = ak;
-                    --great;
-                }
-            }
-
-            /*
-             * Sort left and right parts recursively.
-             * All elements from center part are equal
-             * and, therefore, already sorted.
-             */
-            sort(a, left, less - 1, leftmost);
-            sort(a, great + 1, right, false);
+            high = lower; // Iterate along the left part
         }
     }
 
-    /** The number of distinct byte values. */
-    private static final int NUM_BYTE_VALUES = 1 << 8;
-
     /**
-     * Sorts the specified range of the array.
+     * Sorts the specified range of the array using insertion sort.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    static void sort(byte[] a, int left, int right) {
-        // Use counting sort on large arrays
-        if (right - left > COUNTING_SORT_THRESHOLD_FOR_BYTE) {
-            int[] count = new int[NUM_BYTE_VALUES];
-
-            for (int i = left - 1; ++i <= right;
-                count[a[i] - Byte.MIN_VALUE]++
-            );
-            for (int i = NUM_BYTE_VALUES, k = right + 1; k > left; ) {
-                while (count[--i] == 0);
-                byte value = (byte) (i + Byte.MIN_VALUE);
-                int s = count[i];
-
-                do {
-                    a[--k] = value;
-                } while (--s > 0);
-            }
-        } else { // Use insertion sort on small arrays
-            for (int i = left, j = i; i < right; j = ++i) {
-                byte ai = a[i + 1];
-                while (ai < a[j]) {
-                    a[j + 1] = a[j];
-                    if (j-- == left) {
-                        break;
-                    }
+    private static void insertionSort(char[] a, int low, int high) {
+        for (int i, k = low; ++k < high; ) {
+            char ai = a[i = k];
+
+            if (ai < a[i - 1]) {
+                while (--i >= low && ai < a[i]) {
+                    a[i + 1] = a[i];
                 }
-                a[j + 1] = ai;
+                a[i + 1] = ai;
             }
         }
     }
 
     /**
-     * Sorts the specified range of the array using the given
-     * workspace array slice if possible for merging
+     * The number of distinct char values.
+     */
+    private static final int NUM_CHAR_VALUES = 1 << 16;
+
+    /**
+     * Sorts the specified range of the array using counting sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void countingSort(char[] a, int low, int high) {
+        int[] count = new int[NUM_CHAR_VALUES];
+
+        /*
+         * Compute a histogram with the number of each values.
+         */
+        for (int i = high; i > low; ++count[a[--i]]);
+
+        /*
+         * Place values on their final positions.
+         */
+        if (high - low > NUM_CHAR_VALUES) {
+            for (int i = NUM_CHAR_VALUES; i > 0; ) {
+                for (low = high - count[--i]; high > low;
+                    a[--high] = (char) i
+                );
+            }
+        } else {
+            for (int i = NUM_CHAR_VALUES; high > low; ) {
+                while (count[--i] == 0);
+                int c = count[i];
+
+                do {
+                    a[--high] = (char) i;
+                } while (--c > 0);
+            }
+        }
+    }
+
+// [short]
+
+    /**
+     * Sorts the specified range of the array using
+     * counting sort or Dual-Pivot Quicksort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(short[] a, int low, int high) {
+        if (high - low > MIN_SHORT_OR_CHAR_COUNTING_SORT_SIZE) {
+            countingSort(a, low, high);
+        } else {
+            sort(a, 0, low, high);
+        }
+    }
+
+    /**
+     * Sorts the specified array using the Dual-Pivot Quicksort and/or
+     * other sorts in special-cases, possibly with parallel partitions.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param bits the combination of recursion depth and bit flag, where
+     *        the right bit "0" indicates that array is the leftmost part
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    static void sort(float[] a, int left, int right,
-                     float[] work, int workBase, int workLen) {
+    static void sort(short[] a, int bits, int low, int high) {
+        while (true) {
+            int end = high - 1, size = high - low;
+
+            /*
+             * Invoke insertion sort on small leftmost part.
+             */
+            if (size < MAX_INSERTION_SORT_SIZE) {
+                insertionSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Switch to counting sort if execution
+             * time is becoming quadratic.
+             */
+            if ((bits += DELTA) > MAX_RECURSION_DEPTH) {
+                countingSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Use an inexpensive approximation of the golden ratio
+             * to select five sample elements and determine pivots.
+             */
+            int step = (size >> 3) * 3 + 3;
+
+            /*
+             * Five elements around (and including) the central element
+             * will be used for pivot selection as described below. The
+             * unequal choice of spacing these elements was empirically
+             * determined to work well on a wide variety of inputs.
+             */
+            int e1 = low + step;
+            int e5 = end - step;
+            int e3 = (e1 + e5) >>> 1;
+            int e2 = (e1 + e3) >>> 1;
+            int e4 = (e3 + e5) >>> 1;
+            short a3 = a[e3];
+
+            /*
+             * Sort these elements in place by the combination
+             * of 4-element sorting network and insertion sort.
+             *
+             *    5 ------o-----------o------------
+             *            |           |
+             *    4 ------|-----o-----o-----o------
+             *            |     |           |
+             *    2 ------o-----|-----o-----o------
+             *                  |     |
+             *    1 ------------o-----o------------
+             */
+            if (a[e5] < a[e2]) { short t = a[e5]; a[e5] = a[e2]; a[e2] = t; }
+            if (a[e4] < a[e1]) { short t = a[e4]; a[e4] = a[e1]; a[e1] = t; }
+            if (a[e5] < a[e4]) { short t = a[e5]; a[e5] = a[e4]; a[e4] = t; }
+            if (a[e2] < a[e1]) { short t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
+            if (a[e4] < a[e2]) { short t = a[e4]; a[e4] = a[e2]; a[e2] = t; }
+
+            if (a3 < a[e2]) {
+                if (a3 < a[e1]) {
+                    a[e3] = a[e2]; a[e2] = a[e1]; a[e1] = a3;
+                } else {
+                    a[e3] = a[e2]; a[e2] = a3;
+                }
+            } else if (a3 > a[e4]) {
+                if (a3 > a[e5]) {
+                    a[e3] = a[e4]; a[e4] = a[e5]; a[e5] = a3;
+                } else {
+                    a[e3] = a[e4]; a[e4] = a3;
+                }
+            }
+
+            // Pointers
+            int lower = low; // The index of the last element of the left part
+            int upper = end; // The index of the first element of the right part
+
+            /*
+             * Partitioning with 2 pivots in case of different elements.
+             */
+            if (a[e1] < a[e2] && a[e2] < a[e3] && a[e3] < a[e4] && a[e4] < a[e5]) {
+
+                /*
+                 * Use the first and fifth of the five sorted elements as
+                 * the pivots. These values are inexpensive approximation
+                 * of tertiles. Note, that pivot1 < pivot2.
+                 */
+                short pivot1 = a[e1];
+                short pivot2 = a[e5];
+
+                /*
+                 * The first and the last elements to be sorted are moved
+                 * to the locations formerly occupied by the pivots. When
+                 * partitioning is completed, the pivots are swapped back
+                 * into their final positions, and excluded from the next
+                 * subsequent sorting.
+                 */
+                a[e1] = a[lower];
+                a[e5] = a[upper];
+
+                /*
+                 * Skip elements, which are less or greater than the pivots.
+                 */
+                while (a[++lower] < pivot1);
+                while (a[--upper] > pivot2);
+
+                /*
+                 * Backward 3-interval partitioning
+                 *
+                 *   left part                 central part          right part
+                 * +------------------------------------------------------------+
+                 * |  < pivot1  |   ?   |  pivot1 <= && <= pivot2  |  > pivot2  |
+                 * +------------------------------------------------------------+
+                 *             ^       ^                            ^
+                 *             |       |                            |
+                 *           lower     k                          upper
+                 *
+                 * Invariants:
+                 *
+                 *              all in (low, lower] < pivot1
+                 *    pivot1 <= all in (k, upper)  <= pivot2
+                 *              all in [upper, end) > pivot2
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int unused = --lower, k = ++upper; --k > lower; ) {
+                    short ak = a[k];
+
+                    if (ak < pivot1) { // Move a[k] to the left side
+                        while (lower < k) {
+                            if (a[++lower] >= pivot1) {
+                                if (a[lower] > pivot2) {
+                                    a[k] = a[--upper];
+                                    a[upper] = a[lower];
+                                } else {
+                                    a[k] = a[lower];
+                                }
+                                a[lower] = ak;
+                                break;
+                            }
+                        }
+                    } else if (ak > pivot2) { // Move a[k] to the right side
+                        a[k] = a[--upper];
+                        a[upper] = ak;
+                    }
+                }
+
+                /*
+                 * Swap the pivots into their final positions.
+                 */
+                a[low] = a[lower]; a[lower] = pivot1;
+                a[end] = a[upper]; a[upper] = pivot2;
+
+                /*
+                 * Sort non-left parts recursively,
+                 * excluding known pivots.
+                 */
+                sort(a, bits | 1, lower + 1, upper);
+                sort(a, bits | 1, upper + 1, high);
+
+            } else { // Use single pivot in case of many equal elements
+
+                /*
+                 * Use the third of the five sorted elements as the pivot.
+                 * This value is inexpensive approximation of the median.
+                 */
+                short pivot = a[e3];
+
+                /*
+                 * The first element to be sorted is moved to the
+                 * location formerly occupied by the pivot. After
+                 * completion of partitioning the pivot is swapped
+                 * back into its final position, and excluded from
+                 * the next subsequent sorting.
+                 */
+                a[e3] = a[lower];
+
+                /*
+                 * Traditional 3-way (Dutch National Flag) partitioning
+                 *
+                 *   left part                 central part    right part
+                 * +------------------------------------------------------+
+                 * |   < pivot   |     ?     |   == pivot   |   > pivot   |
+                 * +------------------------------------------------------+
+                 *              ^           ^                ^
+                 *              |           |                |
+                 *            lower         k              upper
+                 *
+                 * Invariants:
+                 *
+                 *   all in (low, lower] < pivot
+                 *   all in (k, upper)  == pivot
+                 *   all in [upper, end] > pivot
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int k = ++upper; --k > lower; ) {
+                    short ak = a[k];
+
+                    if (ak != pivot) {
+                        a[k] = pivot;
+
+                        if (ak < pivot) { // Move a[k] to the left side
+                            while (a[++lower] < pivot);
+
+                            if (a[lower] > pivot) {
+                                a[--upper] = a[lower];
+                            }
+                            a[lower] = ak;
+                        } else { // ak > pivot - Move a[k] to the right side
+                            a[--upper] = ak;
+                        }
+                    }
+                }
+
+                /*
+                 * Swap the pivot into its final position.
+                 */
+                a[low] = a[lower]; a[lower] = pivot;
+
+                /*
+                 * Sort the right part, excluding known pivot.
+                 * All elements from the central part are
+                 * equal and therefore already sorted.
+                 */
+                sort(a, bits | 1, upper, high);
+            }
+            high = lower; // Iterate along the left part
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using insertion sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void insertionSort(short[] a, int low, int high) {
+        for (int i, k = low; ++k < high; ) {
+            short ai = a[i = k];
+
+            if (ai < a[i - 1]) {
+                while (--i >= low && ai < a[i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        }
+    }
+
+    /**
+     * The number of distinct short values.
+     */
+    private static final int NUM_SHORT_VALUES = 1 << 16;
+
+    /**
+     * Max index of short counter.
+     */
+    private static final int MAX_SHORT_INDEX = Short.MAX_VALUE + NUM_SHORT_VALUES + 1;
+
+    /**
+     * Sorts the specified range of the array using counting sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void countingSort(short[] a, int low, int high) {
+        int[] count = new int[NUM_SHORT_VALUES];
+
         /*
-         * Phase 1: Move NaNs to the end of the array.
+         * Compute a histogram with the number of each values.
+         */
+        for (int i = high; i > low; ++count[a[--i] & 0xFFFF]);
+
+        /*
+         * Place values on their final positions.
          */
-        while (left <= right && Float.isNaN(a[right])) {
-            --right;
+        if (high - low > NUM_SHORT_VALUES) {
+            for (int i = MAX_SHORT_INDEX; --i > Short.MAX_VALUE; ) {
+                int value = i & 0xFFFF;
+
+                for (low = high - count[value]; high > low;
+                    a[--high] = (short) value
+                );
+            }
+        } else {
+            for (int i = MAX_SHORT_INDEX; high > low; ) {
+                while (count[--i & 0xFFFF] == 0);
+
+                int value = i & 0xFFFF;
+                int c = count[value];
+
+                do {
+                    a[--high] = (short) value;
+                } while (--c > 0);
+            }
         }
-        for (int k = right; --k >= left; ) {
-            float ak = a[k];
-            if (ak != ak) { // a[k] is NaN
-                a[k] = a[right];
-                a[right] = ak;
-                --right;
+    }
+
+// [float]
+
+    /**
+     * Sorts the specified range of the array using parallel merge
+     * sort and/or Dual-Pivot Quicksort.
+     *
+     * To balance the faster splitting and parallelism of merge sort
+     * with the faster element partitioning of Quicksort, ranges are
+     * subdivided in tiers such that, if there is enough parallelism,
+     * the four-way parallel merge is started, still ensuring enough
+     * parallelism to process the partitions.
+     *
+     * @param a the array to be sorted
+     * @param parallelism the parallelism level
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(float[] a, int parallelism, int low, int high) {
+        /*
+         * Phase 1. Count the number of negative zero -0.0f,
+         * turn them into positive zero, and move all NaNs
+         * to the end of the array.
+         */
+        int numNegativeZero = 0;
+
+        for (int k = high; k > low; ) {
+            float ak = a[--k];
+
+            if (ak == 0.0f && Float.floatToRawIntBits(ak) < 0) { // ak is -0.0f
+                numNegativeZero += 1;
+                a[k] = 0.0f;
+            } else if (ak != ak) { // ak is NaN
+                a[k] = a[--high];
+                a[high] = ak;
             }
         }
 
         /*
-         * Phase 2: Sort everything except NaNs (which are already in place).
+         * Phase 2. Sort everything except NaNs,
+         * which are already in place.
          */
-        doSort(a, left, right, work, workBase, workLen);
-
-        /*
-         * Phase 3: Place negative zeros before positive zeros.
-         */
-        int hi = right;
+        int size = high - low;
+
+        if (parallelism > 1 && size > MIN_PARALLEL_SORT_SIZE) {
+            int depth = getDepth(parallelism, size >> 12);
+            float[] b = depth == 0 ? null : new float[size];
+            new Sorter(null, a, b, low, size, low, depth).invoke();
+        } else {
+            sort(null, a, 0, low, high);
+        }
 
         /*
-         * Find the first zero, or first positive, or last negative element.
+         * Phase 3. Turn positive zero 0.0f
+         * back into negative zero -0.0f.
          */
-        while (left < hi) {
-            int middle = (left + hi) >>> 1;
-            float middleValue = a[middle];
-
-            if (middleValue < 0.0f) {
-                left = middle + 1;
+        if (++numNegativeZero == 1) {
+            return;
+        }
+
+        /*
+         * Find the position one less than
+         * the index of the first zero.
+         */
+        while (low <= high) {
+            int middle = (low + high) >>> 1;
+
+            if (a[middle] < 0) {
+                low = middle + 1;
             } else {
-                hi = middle;
+                high = middle - 1;
             }
         }
 
         /*
-         * Skip the last negative value (if any) or all leading negative zeros.
+         * Replace the required number of 0.0f by -0.0f.
          */
-        while (left <= right && Float.floatToRawIntBits(a[left]) < 0) {
-            ++left;
+        while (--numNegativeZero > 0) {
+            a[++high] = -0.0f;
         }
-
-        /*
-         * Move negative zeros to the beginning of the sub-range.
-         *
-         * Partitioning:
-         *
-         * +----------------------------------------------------+
-         * |   < 0.0   |   -0.0   |   0.0   |   ?  ( >= 0.0 )   |
-         * +----------------------------------------------------+
-         *              ^          ^         ^
-         *              |          |         |
-         *             left        p         k
-         *
-         * Invariants:
-         *
-         *   all in (*,  left)  <  0.0
-         *   all in [left,  p) == -0.0
-         *   all in [p,     k) ==  0.0
-         *   all in [k, right] >=  0.0
-         *
-         * Pointer k is the first index of ?-part.
-         */
-        for (int k = left, p = left - 1; ++k <= right; ) {
-            float ak = a[k];
-            if (ak != 0.0f) {
-                break;
+    }
+
+    /**
+     * Sorts the specified array using the Dual-Pivot Quicksort and/or
+     * other sorts in special-cases, possibly with parallel partitions.
+     *
+     * @param sorter parallel context
+     * @param a the array to be sorted
+     * @param bits the combination of recursion depth and bit flag, where
+     *        the right bit "0" indicates that array is the leftmost part
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(Sorter sorter, float[] a, int bits, int low, int high) {
+        while (true) {
+            int end = high - 1, size = high - low;
+
+            /*
+             * Run mixed insertion sort on small non-leftmost parts.
+             */
+            if (size < MAX_MIXED_INSERTION_SORT_SIZE + bits && (bits & 1) > 0) {
+                mixedInsertionSort(a, low, high - 3 * ((size >> 5) << 3), high);
+                return;
+            }
+
+            /*
+             * Invoke insertion sort on small leftmost part.
+             */
+            if (size < MAX_INSERTION_SORT_SIZE) {
+                insertionSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Check if the whole array or large non-leftmost
+             * parts are nearly sorted and then merge runs.
+             */
+            if ((bits == 0 || size > MIN_TRY_MERGE_SIZE && (bits & 1) > 0)
+                    && tryMergeRuns(sorter, a, low, size)) {
+                return;
+            }
+
+            /*
+             * Switch to heap sort if execution
+             * time is becoming quadratic.
+             */
+            if ((bits += DELTA) > MAX_RECURSION_DEPTH) {
+                heapSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Use an inexpensive approximation of the golden ratio
+             * to select five sample elements and determine pivots.
+             */
+            int step = (size >> 3) * 3 + 3;
+
+            /*
+             * Five elements around (and including) the central element
+             * will be used for pivot selection as described below. The
+             * unequal choice of spacing these elements was empirically
+             * determined to work well on a wide variety of inputs.
+             */
+            int e1 = low + step;
+            int e5 = end - step;
+            int e3 = (e1 + e5) >>> 1;
+            int e2 = (e1 + e3) >>> 1;
+            int e4 = (e3 + e5) >>> 1;
+            float a3 = a[e3];
+
+            /*
+             * Sort these elements in place by the combination
+             * of 4-element sorting network and insertion sort.
+             *
+             *    5 ------o-----------o------------
+             *            |           |
+             *    4 ------|-----o-----o-----o------
+             *            |     |           |
+             *    2 ------o-----|-----o-----o------
+             *                  |     |
+             *    1 ------------o-----o------------
+             */
+            if (a[e5] < a[e2]) { float t = a[e5]; a[e5] = a[e2]; a[e2] = t; }
+            if (a[e4] < a[e1]) { float t = a[e4]; a[e4] = a[e1]; a[e1] = t; }
+            if (a[e5] < a[e4]) { float t = a[e5]; a[e5] = a[e4]; a[e4] = t; }
+            if (a[e2] < a[e1]) { float t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
+            if (a[e4] < a[e2]) { float t = a[e4]; a[e4] = a[e2]; a[e2] = t; }
+
+            if (a3 < a[e2]) {
+                if (a3 < a[e1]) {
+                    a[e3] = a[e2]; a[e2] = a[e1]; a[e1] = a3;
+                } else {
+                    a[e3] = a[e2]; a[e2] = a3;
+                }
+            } else if (a3 > a[e4]) {
+                if (a3 > a[e5]) {
+                    a[e3] = a[e4]; a[e4] = a[e5]; a[e5] = a3;
+                } else {
+                    a[e3] = a[e4]; a[e4] = a3;
+                }
             }
-            if (Float.floatToRawIntBits(ak) < 0) { // ak is -0.0f
-                a[k] = 0.0f;
-                a[++p] = -0.0f;
+
+            // Pointers
+            int lower = low; // The index of the last element of the left part
+            int upper = end; // The index of the first element of the right part
+
+            /*
+             * Partitioning with 2 pivots in case of different elements.
+             */
+            if (a[e1] < a[e2] && a[e2] < a[e3] && a[e3] < a[e4] && a[e4] < a[e5]) {
+
+                /*
+                 * Use the first and fifth of the five sorted elements as
+                 * the pivots. These values are inexpensive approximation
+                 * of tertiles. Note, that pivot1 < pivot2.
+                 */
+                float pivot1 = a[e1];
+                float pivot2 = a[e5];
+
+                /*
+                 * The first and the last elements to be sorted are moved
+                 * to the locations formerly occupied by the pivots. When
+                 * partitioning is completed, the pivots are swapped back
+                 * into their final positions, and excluded from the next
+                 * subsequent sorting.
+                 */
+                a[e1] = a[lower];
+                a[e5] = a[upper];
+
+                /*
+                 * Skip elements, which are less or greater than the pivots.
+                 */
+                while (a[++lower] < pivot1);
+                while (a[--upper] > pivot2);
+
+                /*
+                 * Backward 3-interval partitioning
+                 *
+                 *   left part                 central part          right part
+                 * +------------------------------------------------------------+
+                 * |  < pivot1  |   ?   |  pivot1 <= && <= pivot2  |  > pivot2  |
+                 * +------------------------------------------------------------+
+                 *             ^       ^                            ^
+                 *             |       |                            |
+                 *           lower     k                          upper
+                 *
+                 * Invariants:
+                 *
+                 *              all in (low, lower] < pivot1
+                 *    pivot1 <= all in (k, upper)  <= pivot2
+                 *              all in [upper, end) > pivot2
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int unused = --lower, k = ++upper; --k > lower; ) {
+                    float ak = a[k];
+
+                    if (ak < pivot1) { // Move a[k] to the left side
+                        while (lower < k) {
+                            if (a[++lower] >= pivot1) {
+                                if (a[lower] > pivot2) {
+                                    a[k] = a[--upper];
+                                    a[upper] = a[lower];
+                                } else {
+                                    a[k] = a[lower];
+                                }
+                                a[lower] = ak;
+                                break;
+                            }
+                        }
+                    } else if (ak > pivot2) { // Move a[k] to the right side
+                        a[k] = a[--upper];
+                        a[upper] = ak;
+                    }
+                }
+
+                /*
+                 * Swap the pivots into their final positions.
+                 */
+                a[low] = a[lower]; a[lower] = pivot1;
+                a[end] = a[upper]; a[upper] = pivot2;
+
+                /*
+                 * Sort non-left parts recursively (possibly in parallel),
+                 * excluding known pivots.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, lower + 1, upper);
+                    sorter.forkSorter(bits | 1, upper + 1, high);
+                } else {
+                    sort(sorter, a, bits | 1, lower + 1, upper);
+                    sort(sorter, a, bits | 1, upper + 1, high);
+                }
+
+            } else { // Use single pivot in case of many equal elements
+
+                /*
+                 * Use the third of the five sorted elements as the pivot.
+                 * This value is inexpensive approximation of the median.
+                 */
+                float pivot = a[e3];
+
+                /*
+                 * The first element to be sorted is moved to the
+                 * location formerly occupied by the pivot. After
+                 * completion of partitioning the pivot is swapped
+                 * back into its final position, and excluded from
+                 * the next subsequent sorting.
+                 */
+                a[e3] = a[lower];
+
+                /*
+                 * Traditional 3-way (Dutch National Flag) partitioning
+                 *
+                 *   left part                 central part    right part
+                 * +------------------------------------------------------+
+                 * |   < pivot   |     ?     |   == pivot   |   > pivot   |
+                 * +------------------------------------------------------+
+                 *              ^           ^                ^
+                 *              |           |                |
+                 *            lower         k              upper
+                 *
+                 * Invariants:
+                 *
+                 *   all in (low, lower] < pivot
+                 *   all in (k, upper)  == pivot
+                 *   all in [upper, end] > pivot
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int k = ++upper; --k > lower; ) {
+                    float ak = a[k];
+
+                    if (ak != pivot) {
+                        a[k] = pivot;
+
+                        if (ak < pivot) { // Move a[k] to the left side
+                            while (a[++lower] < pivot);
+
+                            if (a[lower] > pivot) {
+                                a[--upper] = a[lower];
+                            }
+                            a[lower] = ak;
+                        } else { // ak > pivot - Move a[k] to the right side
+                            a[--upper] = ak;
+                        }
+                    }
+                }
+
+                /*
+                 * Swap the pivot into its final position.
+                 */
+                a[low] = a[lower]; a[lower] = pivot;
+
+                /*
+                 * Sort the right part (possibly in parallel), excluding
+                 * known pivot. All elements from the central part are
+                 * equal and therefore already sorted.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, upper, high);
+                } else {
+                    sort(sorter, a, bits | 1, upper, high);
+                }
+            }
+            high = lower; // Iterate along the left part
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using mixed insertion sort.
+     *
+     * Mixed insertion sort is combination of simple insertion sort,
+     * pin insertion sort and pair insertion sort.
+     *
+     * In the context of Dual-Pivot Quicksort, the pivot element
+     * from the left part plays the role of sentinel, because it
+     * is less than any elements from the given part. Therefore,
+     * expensive check of the left range can be skipped on each
+     * iteration unless it is the leftmost call.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param end the index of the last element for simple insertion sort
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void mixedInsertionSort(float[] a, int low, int end, int high) {
+        if (end == high) {
+
+            /*
+             * Invoke simple insertion sort on tiny array.
+             */
+            for (int i; ++low < end; ) {
+                float ai = a[i = low];
+
+                while (ai < a[--i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        } else {
+
+            /*
+             * Start with pin insertion sort on small part.
+             *
+             * Pin insertion sort is extended simple insertion sort.
+             * The main idea of this sort is to put elements larger
+             * than an element called pin to the end of array (the
+             * proper area for such elements). It avoids expensive
+             * movements of these elements through the whole array.
+             */
+            float pin = a[end];
+
+            for (int i, p = high; ++low < end; ) {
+                float ai = a[i = low];
+
+                if (ai < a[i - 1]) { // Small element
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    a[i] = a[--i];
+
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = ai;
+
+                } else if (p > i && ai > pin) { // Large element
+
+                    /*
+                     * Find element smaller than pin.
+                     */
+                    while (a[--p] > pin);
+
+                    /*
+                     * Swap it with large element.
+                     */
+                    if (p > i) {
+                        ai = a[p];
+                        a[p] = a[i];
+                    }
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = ai;
+                }
+            }
+
+            /*
+             * Continue with pair insertion sort on remain part.
+             */
+            for (int i; low < high; ++low) {
+                float a1 = a[i = low], a2 = a[++low];
+
+                /*
+                 * Insert two elements per iteration: at first, insert the
+                 * larger element and then insert the smaller element, but
+                 * from the position where the larger element was inserted.
+                 */
+                if (a1 > a2) {
+
+                    while (a1 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a1;
+
+                    while (a2 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a2;
+
+                } else if (a1 < a[i - 1]) {
+
+                    while (a2 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a2;
+
+                    while (a1 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a1;
+                }
+            }
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using insertion sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void insertionSort(float[] a, int low, int high) {
+        for (int i, k = low; ++k < high; ) {
+            float ai = a[i = k];
+
+            if (ai < a[i - 1]) {
+                while (--i >= low && ai < a[i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
             }
         }
     }
 
     /**
-     * Sorts the specified range of the array.
+     * Sorts the specified range of the array using heap sort.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void doSort(float[] a, int left, int right,
-                               float[] work, int workBase, int workLen) {
-        // Use Quicksort on small arrays
-        if (right - left < QUICKSORT_THRESHOLD) {
-            sort(a, left, right, true);
-            return;
-        }
-
-        /*
-         * Index run[i] is the start of i-th run
-         * (ascending or descending sequence).
-         */
-        int[] run = new int[MAX_RUN_COUNT + 1];
-        int count = 0; run[0] = left;
-
-        // Check if the array is nearly sorted
-        for (int k = left; k < right; run[count] = k) {
-            // Equal items in the beginning of the sequence
-            while (k < right && a[k] == a[k + 1])
-                k++;
-            if (k == right) break;  // Sequence finishes with equal items
-            if (a[k] < a[k + 1]) { // ascending
-                while (++k <= right && a[k - 1] <= a[k]);
-            } else if (a[k] > a[k + 1]) { // descending
-                while (++k <= right && a[k - 1] >= a[k]);
-                // Transform into an ascending sequence
-                for (int lo = run[count] - 1, hi = k; ++lo < --hi; ) {
-                    float t = a[lo]; a[lo] = a[hi]; a[hi] = t;
-                }
-            }
-
-            // Merge a transformed descending sequence followed by an
-            // ascending sequence
-            if (run[count] > left && a[run[count]] >= a[run[count] - 1]) {
-                count--;
-            }
-
-            /*
-             * The array is not highly structured,
-             * use Quicksort instead of merge sort.
-             */
-            if (++count == MAX_RUN_COUNT) {
-                sort(a, left, right, true);
-                return;
-            }
+    private static void heapSort(float[] a, int low, int high) {
+        for (int k = (low + high) >>> 1; k > low; ) {
+            pushDown(a, --k, a[k], low, high);
         }
-
-        // These invariants should hold true:
-        //    run[0] = 0
-        //    run[<last>] = right + 1; (terminator)
-
-        if (count == 0) {
-            // A single equal run
-            return;
-        } else if (count == 1 && run[count] > right) {
-            // Either a single ascending or a transformed descending run.
-            // Always check that a final run is a proper terminator, otherwise
-            // we have an unterminated trailing run, to handle downstream.
-            return;
-        }
-        right++;
-        if (run[count] < right) {
-            // Corner case: the final run is not a terminator. This may happen
-            // if a final run is an equals run, or there is a single-element run
-            // at the end. Fix up by adding a proper terminator at the end.
-            // Note that we terminate with (right + 1), incremented earlier.
-            run[++count] = right;
-        }
-
-        // Determine alternation base for merge
-        byte odd = 0;
-        for (int n = 1; (n <<= 1) < count; odd ^= 1);
-
-        // Use or create temporary array b for merging
-        float[] b;                 // temp array; alternates with a
-        int ao, bo;              // array offsets from 'left'
-        int blen = right - left; // space needed for b
-        if (work == null || workLen < blen || workBase + blen > work.length) {
-            work = new float[blen];
-            workBase = 0;
-        }
-        if (odd == 0) {
-            System.arraycopy(a, left, work, workBase, blen);
-            b = a;
-            bo = 0;
-            a = work;
-            ao = workBase - left;
-        } else {
-            b = work;
-            ao = 0;
-            bo = workBase - left;
-        }
-
-        // Merging
-        for (int last; count > 1; count = last) {
-            for (int k = (last = 0) + 2; k <= count; k += 2) {
-                int hi = run[k], mi = run[k - 1];
-                for (int i = run[k - 2], p = i, q = mi; i < hi; ++i) {
-                    if (q >= hi || p < mi && a[p + ao] <= a[q + ao]) {
-                        b[i + bo] = a[p++ + ao];
-                    } else {
-                        b[i + bo] = a[q++ + ao];
-                    }
-                }
-                run[++last] = hi;
-            }
-            if ((count & 1) != 0) {
-                for (int i = right, lo = run[count - 1]; --i >= lo;
-                    b[i + bo] = a[i + ao]
-                );
-                run[++last] = right;
-            }
-            float[] t = a; a = b; b = t;
-            int o = ao; ao = bo; bo = o;
+        while (--high > low) {
+            float max = a[low];
+            pushDown(a, low, a[high], low, high);
+            a[high] = max;
         }
     }
 
     /**
-     * Sorts the specified range of the array by Dual-Pivot Quicksort.
+     * Pushes specified element down during heap sort.
      *
-     * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param leftmost indicates if this part is the leftmost in the range
+     * @param a the given array
+     * @param p the start index
+     * @param value the given element
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void sort(float[] a, int left, int right, boolean leftmost) {
-        int length = right - left + 1;
-
-        // Use insertion sort on tiny arrays
-        if (length < INSERTION_SORT_THRESHOLD) {
-            if (leftmost) {
-                /*
-                 * Traditional (without sentinel) insertion sort,
-                 * optimized for server VM, is used in case of
-                 * the leftmost part.
-                 */
-                for (int i = left, j = i; i < right; j = ++i) {
-                    float ai = a[i + 1];
-                    while (ai < a[j]) {
-                        a[j + 1] = a[j];
-                        if (j-- == left) {
-                            break;
-                        }
-                    }
-                    a[j + 1] = ai;
-                }
-            } else {
-                /*
-                 * Skip the longest ascending sequence.
-                 */
-                do {
-                    if (left >= right) {
-                        return;
-                    }
-                } while (a[++left] >= a[left - 1]);
-
-                /*
-                 * Every element from adjoining part plays the role
-                 * of sentinel, therefore this allows us to avoid the
-                 * left range check on each iteration. Moreover, we use
-                 * the more optimized algorithm, so called pair insertion
-                 * sort, which is faster (in the context of Quicksort)
-                 * than traditional implementation of insertion sort.
-                 */
-                for (int k = left; ++left <= right; k = ++left) {
-                    float a1 = a[k], a2 = a[left];
-
-                    if (a1 < a2) {
-                        a2 = a1; a1 = a[left];
-                    }
-                    while (a1 < a[--k]) {
-                        a[k + 2] = a[k];
-                    }
-                    a[++k + 1] = a1;
-
-                    while (a2 < a[--k]) {
-                        a[k + 1] = a[k];
-                    }
-                    a[k + 1] = a2;
-                }
-                float last = a[right];
-
-                while (last < a[--right]) {
-                    a[right + 1] = a[right];
-                }
-                a[right + 1] = last;
+    private static void pushDown(float[] a, int p, float value, int low, int high) {
+        for (int k ;; a[p] = a[p = k]) {
+            k = (p << 1) - low + 2; // Index of the right child
+
+            if (k > high) {
+                break;
+            }
+            if (k == high || a[k] < a[k - 1]) {
+                --k;
+            }
+            if (a[k] <= value) {
+                break;
             }
-            return;
         }
-
-        // Inexpensive approximation of length / 7
-        int seventh = (length >> 3) + (length >> 6) + 1;
+        a[p] = value;
+    }
+
+    /**
+     * Tries to sort the specified range of the array.
+     *
+     * @param sorter parallel context
+     * @param a the array to be sorted
+     * @param low the index of the first element to be sorted
+     * @param size the array size
+     * @return true if finally sorted, false otherwise
+     */
+    private static boolean tryMergeRuns(Sorter sorter, float[] a, int low, int size) {
 
         /*
-         * Sort five evenly spaced elements around (and including) the
-         * center element in the range. These elements will be used for
-         * pivot selection as described below. The choice for spacing
-         * these elements was empirically determined to work well on
-         * a wide variety of inputs.
+         * The run array is constructed only if initial runs are
+         * long enough to continue, run[i] then holds start index
+         * of the i-th sequence of elements in non-descending order.
          */
-        int e3 = (left + right) >>> 1; // The midpoint
-        int e2 = e3 - seventh;
-        int e1 = e2 - seventh;
-        int e4 = e3 + seventh;
-        int e5 = e4 + seventh;
-
-        // Sort these elements using insertion sort
-        if (a[e2] < a[e1]) { float t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
-
-        if (a[e3] < a[e2]) { float t = a[e3]; a[e3] = a[e2]; a[e2] = t;
-            if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-        }
-        if (a[e4] < a[e3]) { float t = a[e4]; a[e4] = a[e3]; a[e3] = t;
-            if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-            }
-        }
-        if (a[e5] < a[e4]) { float t = a[e5]; a[e5] = a[e4]; a[e4] = t;
-            if (t < a[e3]) { a[e4] = a[e3]; a[e3] = t;
-                if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                    if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-                }
-            }
-        }
-
-        // Pointers
-        int less  = left;  // The index of the first element of center part
-        int great = right; // The index before the first element of right part
-
-        if (a[e1] != a[e2] && a[e2] != a[e3] && a[e3] != a[e4] && a[e4] != a[e5]) {
-            /*
-             * Use the second and fourth of the five sorted elements as pivots.
-             * These values are inexpensive approximations of the first and
-             * second terciles of the array. Note that pivot1 <= pivot2.
-             */
-            float pivot1 = a[e2];
-            float pivot2 = a[e4];
-
-            /*
-             * The first and the last elements to be sorted are moved to the
-             * locations formerly occupied by the pivots. When partitioning
-             * is complete, the pivots are swapped back into their final
-             * positions, and excluded from subsequent sorting.
-             */
-            a[e2] = a[left];
-            a[e4] = a[right];
-
-            /*
-             * Skip elements, which are less or greater than pivot values.
-             */
-            while (a[++less] < pivot1);
-            while (a[--great] > pivot2);
+        int[] run = null;
+        int high = low + size;
+        int count = 1, last = low;
+
+        /*
+         * Identify all possible runs.
+         */
+        for (int k = low + 1; k < high; ) {
 
             /*
-             * Partitioning:
-             *
-             *   left part           center part                   right part
-             * +--------------------------------------------------------------+
-             * |  < pivot1  |  pivot1 <= && <= pivot2  |    ?    |  > pivot2  |
-             * +--------------------------------------------------------------+
-             *               ^                          ^       ^
-             *               |                          |       |
-             *              less                        k     great
-             *
-             * Invariants:
-             *
-             *              all in (left, less)   < pivot1
-             *    pivot1 <= all in [less, k)     <= pivot2
-             *              all in (great, right) > pivot2
-             *
-             * Pointer k is the first index of ?-part.
+             * Find the end index of the current run.
              */
-            outer:
-            for (int k = less - 1; ++k <= great; ) {
-                float ak = a[k];
-                if (ak < pivot1) { // Move a[k] to left part
-                    a[k] = a[less];
-                    /*
-                     * Here and below we use "a[i] = b; i++;" instead
-                     * of "a[i++] = b;" due to performance issue.
-                     */
-                    a[less] = ak;
-                    ++less;
-                } else if (ak > pivot2) { // Move a[k] to right part
-                    while (a[great] > pivot2) {
-                        if (great-- == k) {
-                            break outer;
-                        }
-                    }
-                    if (a[great] < pivot1) { // a[great] <= pivot2
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // pivot1 <= a[great] <= pivot2
-                        a[k] = a[great];
-                    }
-                    /*
-                     * Here and below we use "a[i] = b; i--;" instead
-                     * of "a[i--] = b;" due to performance issue.
-                     */
-                    a[great] = ak;
-                    --great;
-                }
-            }
-
-            // Swap pivots into their final positions
-            a[left]  = a[less  - 1]; a[less  - 1] = pivot1;
-            a[right] = a[great + 1]; a[great + 1] = pivot2;
-
-            // Sort left and right parts recursively, excluding known pivots
-            sort(a, left, less - 2, leftmost);
-            sort(a, great + 2, right, false);
-
-            /*
-             * If center part is too large (comprises > 4/7 of the array),
-             * swap internal pivot values to ends.
-             */
-            if (less < e1 && e5 < great) {
-                /*
-                 * Skip elements, which are equal to pivot values.
-                 */
-                while (a[less] == pivot1) {
-                    ++less;
-                }
-
-                while (a[great] == pivot2) {
-                    --great;
+            if (a[k - 1] < a[k]) {
+
+                // Identify ascending sequence
+                while (++k < high && a[k - 1] <= a[k]);
+
+            } else if (a[k - 1] > a[k]) {
+
+                // Identify descending sequence
+                while (++k < high && a[k - 1] >= a[k]);
+
+                // Reverse into ascending order
+                for (int i = last - 1, j = k; ++i < --j && a[i] > a[j]; ) {
+                    float ai = a[i]; a[i] = a[j]; a[j] = ai;
                 }
-
-                /*
-                 * Partitioning:
-                 *
-                 *   left part         center part                  right part
-                 * +----------------------------------------------------------+
-                 * | == pivot1 |  pivot1 < && < pivot2  |    ?    | == pivot2 |
-                 * +----------------------------------------------------------+
-                 *              ^                        ^       ^
-                 *              |                        |       |
-                 *             less                      k     great
-                 *
-                 * Invariants:
-                 *
-                 *              all in (*,  less) == pivot1
-                 *     pivot1 < all in [less,  k)  < pivot2
-                 *              all in (great, *) == pivot2
-                 *
-                 * Pointer k is the first index of ?-part.
-                 */
-                outer:
-                for (int k = less - 1; ++k <= great; ) {
-                    float ak = a[k];
-                    if (ak == pivot1) { // Move a[k] to left part
-                        a[k] = a[less];
-                        a[less] = ak;
-                        ++less;
-                    } else if (ak == pivot2) { // Move a[k] to right part
-                        while (a[great] == pivot2) {
-                            if (great-- == k) {
-                                break outer;
-                            }
-                        }
-                        if (a[great] == pivot1) { // a[great] < pivot2
-                            a[k] = a[less];
-                            /*
-                             * Even though a[great] equals to pivot1, the
-                             * assignment a[less] = pivot1 may be incorrect,
-                             * if a[great] and pivot1 are floating-point zeros
-                             * of different signs. Therefore in float and
-                             * double sorting methods we have to use more
-                             * accurate assignment a[less] = a[great].
-                             */
-                            a[less] = a[great];
-                            ++less;
-                        } else { // pivot1 < a[great] < pivot2
-                            a[k] = a[great];
-                        }
-                        a[great] = ak;
-                        --great;
-                    }
-                }
-            }
-
-            // Sort center part recursively
-            sort(a, less, great, false);
-
-        } else { // Partitioning with one pivot
-            /*
-             * Use the third of the five sorted elements as pivot.
-             * This value is inexpensive approximation of the median.
-             */
-            float pivot = a[e3];
-
-            /*
-             * Partitioning degenerates to the traditional 3-way
-             * (or "Dutch National Flag") schema:
-             *
-             *   left part    center part              right part
-             * +-------------------------------------------------+
-             * |  < pivot  |   == pivot   |     ?    |  > pivot  |
-             * +-------------------------------------------------+
-             *              ^              ^        ^
-             *              |              |        |
-             *             less            k      great
-             *
-             * Invariants:
-             *
-             *   all in (left, less)   < pivot
-             *   all in [less, k)     == pivot
-             *   all in (great, right) > pivot
-             *
-             * Pointer k is the first index of ?-part.
-             */
-            for (int k = less; k <= great; ++k) {
-                if (a[k] == pivot) {
+            } else { // Identify constant sequence
+                for (float ak = a[k]; ++k < high && ak == a[k]; );
+
+                if (k < high) {
                     continue;
                 }
-                float ak = a[k];
-                if (ak < pivot) { // Move a[k] to left part
-                    a[k] = a[less];
-                    a[less] = ak;
-                    ++less;
-                } else { // a[k] > pivot - Move a[k] to right part
-                    while (a[great] > pivot) {
-                        --great;
-                    }
-                    if (a[great] < pivot) { // a[great] <= pivot
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // a[great] == pivot
-                        /*
-                         * Even though a[great] equals to pivot, the
-                         * assignment a[k] = pivot may be incorrect,
-                         * if a[great] and pivot are floating-point
-                         * zeros of different signs. Therefore in float
-                         * and double sorting methods we have to use
-                         * more accurate assignment a[k] = a[great].
-                         */
-                        a[k] = a[great];
-                    }
-                    a[great] = ak;
-                    --great;
-                }
             }
 
             /*
-             * Sort left and right parts recursively.
-             * All elements from center part are equal
-             * and, therefore, already sorted.
+             * Check special cases.
              */
-            sort(a, left, less - 1, leftmost);
-            sort(a, great + 1, right, false);
+            if (run == null) {
+                if (k == high) {
+
+                    /*
+                     * The array is monotonous sequence,
+                     * and therefore already sorted.
+                     */
+                    return true;
+                }
+
+                if (k - low < MIN_FIRST_RUN_SIZE) {
+
+                    /*
+                     * The first run is too small
+                     * to proceed with scanning.
+                     */
+                    return false;
+                }
+
+                run = new int[((size >> 10) | 0x7F) & 0x3FF];
+                run[0] = low;
+
+            } else if (a[last - 1] > a[last]) {
+
+                if (count > (k - low) >> MIN_FIRST_RUNS_FACTOR) {
+
+                    /*
+                     * The first runs are not long
+                     * enough to continue scanning.
+                     */
+                    return false;
+                }
+
+                if (++count == MAX_RUN_CAPACITY) {
+
+                    /*
+                     * Array is not highly structured.
+                     */
+                    return false;
+                }
+
+                if (count == run.length) {
+
+                    /*
+                     * Increase capacity of index array.
+                     */
+                    run = Arrays.copyOf(run, count << 1);
+                }
+            }
+            run[count] = (last = k);
         }
+
+        /*
+         * Merge runs of highly structured array.
+         */
+        if (count > 1) {
+            float[] b; int offset = low;
+
+            if (sorter == null || (b = (float[]) sorter.b) == null) {
+                b = new float[size];
+            } else {
+                offset = sorter.offset;
+            }
+            mergeRuns(a, b, offset, 1, sorter != null, run, 0, count);
+        }
+        return true;
+    }
+
+    /**
+     * Merges the specified runs.
+     *
+     * @param a the source array
+     * @param b the temporary buffer used in merging
+     * @param offset the start index in the source, inclusive
+     * @param aim specifies merging: to source ( > 0), buffer ( < 0) or any ( == 0)
+     * @param parallel indicates whether merging is performed in parallel
+     * @param run the start indexes of the runs, inclusive
+     * @param lo the start index of the first run, inclusive
+     * @param hi the start index of the last run, inclusive
+     * @return the destination where runs are merged
+     */
+    private static float[] mergeRuns(float[] a, float[] b, int offset,
+            int aim, boolean parallel, int[] run, int lo, int hi) {
+
+        if (hi - lo == 1) {
+            if (aim >= 0) {
+                return a;
+            }
+            for (int i = run[hi], j = i - offset, low = run[lo]; i > low;
+                b[--j] = a[--i]
+            );
+            return b;
+        }
+
+        /*
+         * Split into approximately equal parts.
+         */
+        int mi = lo, rmi = (run[lo] + run[hi]) >>> 1;
+        while (run[++mi + 1] <= rmi);
+
+        /*
+         * Merge the left and right parts.
+         */
+        float[] a1, a2;
+
+        if (parallel && hi - lo > MIN_RUN_COUNT) {
+            RunMerger merger = new RunMerger(a, b, offset, 0, run, mi, hi).forkMe();
+            a1 = mergeRuns(a, b, offset, -aim, true, run, lo, mi);
+            a2 = (float[]) merger.getDestination();
+        } else {
+            a1 = mergeRuns(a, b, offset, -aim, false, run, lo, mi);
+            a2 = mergeRuns(a, b, offset,    0, false, run, mi, hi);
+        }
+
+        float[] dst = a1 == a ? b : a;
+
+        int k   = a1 == a ? run[lo] - offset : run[lo];
+        int lo1 = a1 == b ? run[lo] - offset : run[lo];
+        int hi1 = a1 == b ? run[mi] - offset : run[mi];
+        int lo2 = a2 == b ? run[mi] - offset : run[mi];
+        int hi2 = a2 == b ? run[hi] - offset : run[hi];
+
+        if (parallel) {
+            new Merger(null, dst, k, a1, lo1, hi1, a2, lo2, hi2).invoke();
+        } else {
+            mergeParts(null, dst, k, a1, lo1, hi1, a2, lo2, hi2);
+        }
+        return dst;
     }
 
     /**
-     * Sorts the specified range of the array using the given
-     * workspace array slice if possible for merging
+     * Merges the sorted parts.
+     *
+     * @param merger parallel context
+     * @param dst the destination where parts are merged
+     * @param k the start index of the destination, inclusive
+     * @param a1 the first part
+     * @param lo1 the start index of the first part, inclusive
+     * @param hi1 the end index of the first part, exclusive
+     * @param a2 the second part
+     * @param lo2 the start index of the second part, inclusive
+     * @param hi2 the end index of the second part, exclusive
+     */
+    private static void mergeParts(Merger merger, float[] dst, int k,
+            float[] a1, int lo1, int hi1, float[] a2, int lo2, int hi2) {
+
+        if (merger != null && a1 == a2) {
+
+            while (true) {
+
+                /*
+                 * The first part must be larger.
+                 */
+                if (hi1 - lo1 < hi2 - lo2) {
+                    int lo = lo1; lo1 = lo2; lo2 = lo;
+                    int hi = hi1; hi1 = hi2; hi2 = hi;
+                }
+
+                /*
+                 * Small parts will be merged sequentially.
+                 */
+                if (hi1 - lo1 < MIN_PARALLEL_MERGE_PARTS_SIZE) {
+                    break;
+                }
+
+                /*
+                 * Find the median of the larger part.
+                 */
+                int mi1 = (lo1 + hi1) >>> 1;
+                float key = a1[mi1];
+                int mi2 = hi2;
+
+                /*
+                 * Partition the smaller part.
+                 */
+                for (int loo = lo2; loo < mi2; ) {
+                    int t = (loo + mi2) >>> 1;
+
+                    if (key > a2[t]) {
+                        loo = t + 1;
+                    } else {
+                        mi2 = t;
+                    }
+                }
+
+                int d = mi2 - lo2 + mi1 - lo1;
+
+                /*
+                 * Merge the right sub-parts in parallel.
+                 */
+                merger.forkMerger(dst, k + d, a1, mi1, hi1, a2, mi2, hi2);
+
+                /*
+                 * Process the sub-left parts.
+                 */
+                hi1 = mi1;
+                hi2 = mi2;
+            }
+        }
+
+        /*
+         * Merge small parts sequentially.
+         */
+        while (lo1 < hi1 && lo2 < hi2) {
+            dst[k++] = a1[lo1] < a2[lo2] ? a1[lo1++] : a2[lo2++];
+        }
+        if (dst != a1 || k < lo1) {
+            while (lo1 < hi1) {
+                dst[k++] = a1[lo1++];
+            }
+        }
+        if (dst != a2 || k < lo2) {
+            while (lo2 < hi2) {
+                dst[k++] = a2[lo2++];
+            }
+        }
+    }
+
+// [double]
+
+    /**
+     * Sorts the specified range of the array using parallel merge
+     * sort and/or Dual-Pivot Quicksort.
+     *
+     * To balance the faster splitting and parallelism of merge sort
+     * with the faster element partitioning of Quicksort, ranges are
+     * subdivided in tiers such that, if there is enough parallelism,
+     * the four-way parallel merge is started, still ensuring enough
+     * parallelism to process the partitions.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param parallelism the parallelism level
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    static void sort(double[] a, int left, int right,
-                     double[] work, int workBase, int workLen) {
+    static void sort(double[] a, int parallelism, int low, int high) {
+        /*
+         * Phase 1. Count the number of negative zero -0.0d,
+         * turn them into positive zero, and move all NaNs
+         * to the end of the array.
+         */
+        int numNegativeZero = 0;
+
+        for (int k = high; k > low; ) {
+            double ak = a[--k];
+
+            if (ak == 0.0d && Double.doubleToRawLongBits(ak) < 0) { // ak is -0.0d
+                numNegativeZero += 1;
+                a[k] = 0.0d;
+            } else if (ak != ak) { // ak is NaN
+                a[k] = a[--high];
+                a[high] = ak;
+            }
+        }
+
         /*
-         * Phase 1: Move NaNs to the end of the array.
+         * Phase 2. Sort everything except NaNs,
+         * which are already in place.
          */
-        while (left <= right && Double.isNaN(a[right])) {
-            --right;
+        int size = high - low;
+
+        if (parallelism > 1 && size > MIN_PARALLEL_SORT_SIZE) {
+            int depth = getDepth(parallelism, size >> 12);
+            double[] b = depth == 0 ? null : new double[size];
+            new Sorter(null, a, b, low, size, low, depth).invoke();
+        } else {
+            sort(null, a, 0, low, high);
         }
-        for (int k = right; --k >= left; ) {
-            double ak = a[k];
-            if (ak != ak) { // a[k] is NaN
-                a[k] = a[right];
-                a[right] = ak;
-                --right;
+
+        /*
+         * Phase 3. Turn positive zero 0.0d
+         * back into negative zero -0.0d.
+         */
+        if (++numNegativeZero == 1) {
+            return;
+        }
+
+        /*
+         * Find the position one less than
+         * the index of the first zero.
+         */
+        while (low <= high) {
+            int middle = (low + high) >>> 1;
+
+            if (a[middle] < 0) {
+                low = middle + 1;
+            } else {
+                high = middle - 1;
             }
         }
 
         /*
-         * Phase 2: Sort everything except NaNs (which are already in place).
-         */
-        doSort(a, left, right, work, workBase, workLen);
-
-        /*
-         * Phase 3: Place negative zeros before positive zeros.
-         */
-        int hi = right;
-
-        /*
-         * Find the first zero, or first positive, or last negative element.
+         * Replace the required number of 0.0d by -0.0d.
          */
-        while (left < hi) {
-            int middle = (left + hi) >>> 1;
-            double middleValue = a[middle];
-
-            if (middleValue < 0.0d) {
-                left = middle + 1;
-            } else {
-                hi = middle;
-            }
-        }
-
-        /*
-         * Skip the last negative value (if any) or all leading negative zeros.
-         */
-        while (left <= right && Double.doubleToRawLongBits(a[left]) < 0) {
-            ++left;
+        while (--numNegativeZero > 0) {
+            a[++high] = -0.0d;
         }
-
-        /*
-         * Move negative zeros to the beginning of the sub-range.
-         *
-         * Partitioning:
-         *
-         * +----------------------------------------------------+
-         * |   < 0.0   |   -0.0   |   0.0   |   ?  ( >= 0.0 )   |
-         * +----------------------------------------------------+
-         *              ^          ^         ^
-         *              |          |         |
-         *             left        p         k
-         *
-         * Invariants:
-         *
-         *   all in (*,  left)  <  0.0
-         *   all in [left,  p) == -0.0
-         *   all in [p,     k) ==  0.0
-         *   all in [k, right] >=  0.0
-         *
-         * Pointer k is the first index of ?-part.
-         */
-        for (int k = left, p = left - 1; ++k <= right; ) {
-            double ak = a[k];
-            if (ak != 0.0d) {
-                break;
+    }
+
+    /**
+     * Sorts the specified array using the Dual-Pivot Quicksort and/or
+     * other sorts in special-cases, possibly with parallel partitions.
+     *
+     * @param sorter parallel context
+     * @param a the array to be sorted
+     * @param bits the combination of recursion depth and bit flag, where
+     *        the right bit "0" indicates that array is the leftmost part
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    static void sort(Sorter sorter, double[] a, int bits, int low, int high) {
+        while (true) {
+            int end = high - 1, size = high - low;
+
+            /*
+             * Run mixed insertion sort on small non-leftmost parts.
+             */
+            if (size < MAX_MIXED_INSERTION_SORT_SIZE + bits && (bits & 1) > 0) {
+                mixedInsertionSort(a, low, high - 3 * ((size >> 5) << 3), high);
+                return;
+            }
+
+            /*
+             * Invoke insertion sort on small leftmost part.
+             */
+            if (size < MAX_INSERTION_SORT_SIZE) {
+                insertionSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Check if the whole array or large non-leftmost
+             * parts are nearly sorted and then merge runs.
+             */
+            if ((bits == 0 || size > MIN_TRY_MERGE_SIZE && (bits & 1) > 0)
+                    && tryMergeRuns(sorter, a, low, size)) {
+                return;
+            }
+
+            /*
+             * Switch to heap sort if execution
+             * time is becoming quadratic.
+             */
+            if ((bits += DELTA) > MAX_RECURSION_DEPTH) {
+                heapSort(a, low, high);
+                return;
+            }
+
+            /*
+             * Use an inexpensive approximation of the golden ratio
+             * to select five sample elements and determine pivots.
+             */
+            int step = (size >> 3) * 3 + 3;
+
+            /*
+             * Five elements around (and including) the central element
+             * will be used for pivot selection as described below. The
+             * unequal choice of spacing these elements was empirically
+             * determined to work well on a wide variety of inputs.
+             */
+            int e1 = low + step;
+            int e5 = end - step;
+            int e3 = (e1 + e5) >>> 1;
+            int e2 = (e1 + e3) >>> 1;
+            int e4 = (e3 + e5) >>> 1;
+            double a3 = a[e3];
+
+            /*
+             * Sort these elements in place by the combination
+             * of 4-element sorting network and insertion sort.
+             *
+             *    5 ------o-----------o------------
+             *            |           |
+             *    4 ------|-----o-----o-----o------
+             *            |     |           |
+             *    2 ------o-----|-----o-----o------
+             *                  |     |
+             *    1 ------------o-----o------------
+             */
+            if (a[e5] < a[e2]) { double t = a[e5]; a[e5] = a[e2]; a[e2] = t; }
+            if (a[e4] < a[e1]) { double t = a[e4]; a[e4] = a[e1]; a[e1] = t; }
+            if (a[e5] < a[e4]) { double t = a[e5]; a[e5] = a[e4]; a[e4] = t; }
+            if (a[e2] < a[e1]) { double t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
+            if (a[e4] < a[e2]) { double t = a[e4]; a[e4] = a[e2]; a[e2] = t; }
+
+            if (a3 < a[e2]) {
+                if (a3 < a[e1]) {
+                    a[e3] = a[e2]; a[e2] = a[e1]; a[e1] = a3;
+                } else {
+                    a[e3] = a[e2]; a[e2] = a3;
+                }
+            } else if (a3 > a[e4]) {
+                if (a3 > a[e5]) {
+                    a[e3] = a[e4]; a[e4] = a[e5]; a[e5] = a3;
+                } else {
+                    a[e3] = a[e4]; a[e4] = a3;
+                }
             }
-            if (Double.doubleToRawLongBits(ak) < 0) { // ak is -0.0d
-                a[k] = 0.0d;
-                a[++p] = -0.0d;
+
+            // Pointers
+            int lower = low; // The index of the last element of the left part
+            int upper = end; // The index of the first element of the right part
+
+            /*
+             * Partitioning with 2 pivots in case of different elements.
+             */
+            if (a[e1] < a[e2] && a[e2] < a[e3] && a[e3] < a[e4] && a[e4] < a[e5]) {
+
+                /*
+                 * Use the first and fifth of the five sorted elements as
+                 * the pivots. These values are inexpensive approximation
+                 * of tertiles. Note, that pivot1 < pivot2.
+                 */
+                double pivot1 = a[e1];
+                double pivot2 = a[e5];
+
+                /*
+                 * The first and the last elements to be sorted are moved
+                 * to the locations formerly occupied by the pivots. When
+                 * partitioning is completed, the pivots are swapped back
+                 * into their final positions, and excluded from the next
+                 * subsequent sorting.
+                 */
+                a[e1] = a[lower];
+                a[e5] = a[upper];
+
+                /*
+                 * Skip elements, which are less or greater than the pivots.
+                 */
+                while (a[++lower] < pivot1);
+                while (a[--upper] > pivot2);
+
+                /*
+                 * Backward 3-interval partitioning
+                 *
+                 *   left part                 central part          right part
+                 * +------------------------------------------------------------+
+                 * |  < pivot1  |   ?   |  pivot1 <= && <= pivot2  |  > pivot2  |
+                 * +------------------------------------------------------------+
+                 *             ^       ^                            ^
+                 *             |       |                            |
+                 *           lower     k                          upper
+                 *
+                 * Invariants:
+                 *
+                 *              all in (low, lower] < pivot1
+                 *    pivot1 <= all in (k, upper)  <= pivot2
+                 *              all in [upper, end) > pivot2
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int unused = --lower, k = ++upper; --k > lower; ) {
+                    double ak = a[k];
+
+                    if (ak < pivot1) { // Move a[k] to the left side
+                        while (lower < k) {
+                            if (a[++lower] >= pivot1) {
+                                if (a[lower] > pivot2) {
+                                    a[k] = a[--upper];
+                                    a[upper] = a[lower];
+                                } else {
+                                    a[k] = a[lower];
+                                }
+                                a[lower] = ak;
+                                break;
+                            }
+                        }
+                    } else if (ak > pivot2) { // Move a[k] to the right side
+                        a[k] = a[--upper];
+                        a[upper] = ak;
+                    }
+                }
+
+                /*
+                 * Swap the pivots into their final positions.
+                 */
+                a[low] = a[lower]; a[lower] = pivot1;
+                a[end] = a[upper]; a[upper] = pivot2;
+
+                /*
+                 * Sort non-left parts recursively (possibly in parallel),
+                 * excluding known pivots.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, lower + 1, upper);
+                    sorter.forkSorter(bits | 1, upper + 1, high);
+                } else {
+                    sort(sorter, a, bits | 1, lower + 1, upper);
+                    sort(sorter, a, bits | 1, upper + 1, high);
+                }
+
+            } else { // Use single pivot in case of many equal elements
+
+                /*
+                 * Use the third of the five sorted elements as the pivot.
+                 * This value is inexpensive approximation of the median.
+                 */
+                double pivot = a[e3];
+
+                /*
+                 * The first element to be sorted is moved to the
+                 * location formerly occupied by the pivot. After
+                 * completion of partitioning the pivot is swapped
+                 * back into its final position, and excluded from
+                 * the next subsequent sorting.
+                 */
+                a[e3] = a[lower];
+
+                /*
+                 * Traditional 3-way (Dutch National Flag) partitioning
+                 *
+                 *   left part                 central part    right part
+                 * +------------------------------------------------------+
+                 * |   < pivot   |     ?     |   == pivot   |   > pivot   |
+                 * +------------------------------------------------------+
+                 *              ^           ^                ^
+                 *              |           |                |
+                 *            lower         k              upper
+                 *
+                 * Invariants:
+                 *
+                 *   all in (low, lower] < pivot
+                 *   all in (k, upper)  == pivot
+                 *   all in [upper, end] > pivot
+                 *
+                 * Pointer k is the last index of ?-part
+                 */
+                for (int k = ++upper; --k > lower; ) {
+                    double ak = a[k];
+
+                    if (ak != pivot) {
+                        a[k] = pivot;
+
+                        if (ak < pivot) { // Move a[k] to the left side
+                            while (a[++lower] < pivot);
+
+                            if (a[lower] > pivot) {
+                                a[--upper] = a[lower];
+                            }
+                            a[lower] = ak;
+                        } else { // ak > pivot - Move a[k] to the right side
+                            a[--upper] = ak;
+                        }
+                    }
+                }
+
+                /*
+                 * Swap the pivot into its final position.
+                 */
+                a[low] = a[lower]; a[lower] = pivot;
+
+                /*
+                 * Sort the right part (possibly in parallel), excluding
+                 * known pivot. All elements from the central part are
+                 * equal and therefore already sorted.
+                 */
+                if (size > MIN_PARALLEL_SORT_SIZE && sorter != null) {
+                    sorter.forkSorter(bits | 1, upper, high);
+                } else {
+                    sort(sorter, a, bits | 1, upper, high);
+                }
+            }
+            high = lower; // Iterate along the left part
+        }
+    }
+
+    /**
+     * Sorts the specified range of the array using mixed insertion sort.
+     *
+     * Mixed insertion sort is combination of simple insertion sort,
+     * pin insertion sort and pair insertion sort.
+     *
+     * In the context of Dual-Pivot Quicksort, the pivot element
+     * from the left part plays the role of sentinel, because it
+     * is less than any elements from the given part. Therefore,
+     * expensive check of the left range can be skipped on each
+     * iteration unless it is the leftmost call.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param end the index of the last element for simple insertion sort
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void mixedInsertionSort(double[] a, int low, int end, int high) {
+        if (end == high) {
+
+            /*
+             * Invoke simple insertion sort on tiny array.
+             */
+            for (int i; ++low < end; ) {
+                double ai = a[i = low];
+
+                while (ai < a[--i]) {
+                    a[i + 1] = a[i];
+                }
+                a[i + 1] = ai;
+            }
+        } else {
+
+            /*
+             * Start with pin insertion sort on small part.
+             *
+             * Pin insertion sort is extended simple insertion sort.
+             * The main idea of this sort is to put elements larger
+             * than an element called pin to the end of array (the
+             * proper area for such elements). It avoids expensive
+             * movements of these elements through the whole array.
+             */
+            double pin = a[end];
+
+            for (int i, p = high; ++low < end; ) {
+                double ai = a[i = low];
+
+                if (ai < a[i - 1]) { // Small element
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    a[i] = a[--i];
+
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = ai;
+
+                } else if (p > i && ai > pin) { // Large element
+
+                    /*
+                     * Find element smaller than pin.
+                     */
+                    while (a[--p] > pin);
+
+                    /*
+                     * Swap it with large element.
+                     */
+                    if (p > i) {
+                        ai = a[p];
+                        a[p] = a[i];
+                    }
+
+                    /*
+                     * Insert small element into sorted part.
+                     */
+                    while (ai < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = ai;
+                }
+            }
+
+            /*
+             * Continue with pair insertion sort on remain part.
+             */
+            for (int i; low < high; ++low) {
+                double a1 = a[i = low], a2 = a[++low];
+
+                /*
+                 * Insert two elements per iteration: at first, insert the
+                 * larger element and then insert the smaller element, but
+                 * from the position where the larger element was inserted.
+                 */
+                if (a1 > a2) {
+
+                    while (a1 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a1;
+
+                    while (a2 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a2;
+
+                } else if (a1 < a[i - 1]) {
+
+                    while (a2 < a[--i]) {
+                        a[i + 2] = a[i];
+                    }
+                    a[++i + 1] = a2;
+
+                    while (a1 < a[--i]) {
+                        a[i + 1] = a[i];
+                    }
+                    a[i + 1] = a1;
+                }
             }
         }
     }
 
     /**
-     * Sorts the specified range of the array.
+     * Sorts the specified range of the array using insertion sort.
      *
      * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param work a workspace array (slice)
-     * @param workBase origin of usable space in work array
-     * @param workLen usable size of work array
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void doSort(double[] a, int left, int right,
-                               double[] work, int workBase, int workLen) {
-        // Use Quicksort on small arrays
-        if (right - left < QUICKSORT_THRESHOLD) {
-            sort(a, left, right, true);
-            return;
-        }
-
-        /*
-         * Index run[i] is the start of i-th run
-         * (ascending or descending sequence).
-         */
-        int[] run = new int[MAX_RUN_COUNT + 1];
-        int count = 0; run[0] = left;
-
-        // Check if the array is nearly sorted
-        for (int k = left; k < right; run[count] = k) {
-            // Equal items in the beginning of the sequence
-            while (k < right && a[k] == a[k + 1])
-                k++;
-            if (k == right) break;  // Sequence finishes with equal items
-            if (a[k] < a[k + 1]) { // ascending
-                while (++k <= right && a[k - 1] <= a[k]);
-            } else if (a[k] > a[k + 1]) { // descending
-                while (++k <= right && a[k - 1] >= a[k]);
-                // Transform into an ascending sequence
-                for (int lo = run[count] - 1, hi = k; ++lo < --hi; ) {
-                    double t = a[lo]; a[lo] = a[hi]; a[hi] = t;
+    private static void insertionSort(double[] a, int low, int high) {
+        for (int i, k = low; ++k < high; ) {
+            double ai = a[i = k];
+
+            if (ai < a[i - 1]) {
+                while (--i >= low && ai < a[i]) {
+                    a[i + 1] = a[i];
                 }
-            }
-
-            // Merge a transformed descending sequence followed by an
-            // ascending sequence
-            if (run[count] > left && a[run[count]] >= a[run[count] - 1]) {
-                count--;
-            }
-
-            /*
-             * The array is not highly structured,
-             * use Quicksort instead of merge sort.
-             */
-            if (++count == MAX_RUN_COUNT) {
-                sort(a, left, right, true);
-                return;
+                a[i + 1] = ai;
             }
         }
-
-        // These invariants should hold true:
-        //    run[0] = 0
-        //    run[<last>] = right + 1; (terminator)
-
-        if (count == 0) {
-            // A single equal run
-            return;
-        } else if (count == 1 && run[count] > right) {
-            // Either a single ascending or a transformed descending run.
-            // Always check that a final run is a proper terminator, otherwise
-            // we have an unterminated trailing run, to handle downstream.
-            return;
-        }
-        right++;
-        if (run[count] < right) {
-            // Corner case: the final run is not a terminator. This may happen
-            // if a final run is an equals run, or there is a single-element run
-            // at the end. Fix up by adding a proper terminator at the end.
-            // Note that we terminate with (right + 1), incremented earlier.
-            run[++count] = right;
+    }
+
+    /**
+     * Sorts the specified range of the array using heap sort.
+     *
+     * @param a the array to be sorted
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
+     */
+    private static void heapSort(double[] a, int low, int high) {
+        for (int k = (low + high) >>> 1; k > low; ) {
+            pushDown(a, --k, a[k], low, high);
         }
-
-        // Determine alternation base for merge
-        byte odd = 0;
-        for (int n = 1; (n <<= 1) < count; odd ^= 1);
-
-        // Use or create temporary array b for merging
-        double[] b;                 // temp array; alternates with a
-        int ao, bo;              // array offsets from 'left'
-        int blen = right - left; // space needed for b
-        if (work == null || workLen < blen || workBase + blen > work.length) {
-            work = new double[blen];
-            workBase = 0;
-        }
-        if (odd == 0) {
-            System.arraycopy(a, left, work, workBase, blen);
-            b = a;
-            bo = 0;
-            a = work;
-            ao = workBase - left;
-        } else {
-            b = work;
-            ao = 0;
-            bo = workBase - left;
-        }
-
-        // Merging
-        for (int last; count > 1; count = last) {
-            for (int k = (last = 0) + 2; k <= count; k += 2) {
-                int hi = run[k], mi = run[k - 1];
-                for (int i = run[k - 2], p = i, q = mi; i < hi; ++i) {
-                    if (q >= hi || p < mi && a[p + ao] <= a[q + ao]) {
-                        b[i + bo] = a[p++ + ao];
-                    } else {
-                        b[i + bo] = a[q++ + ao];
-                    }
-                }
-                run[++last] = hi;
-            }
-            if ((count & 1) != 0) {
-                for (int i = right, lo = run[count - 1]; --i >= lo;
-                    b[i + bo] = a[i + ao]
-                );
-                run[++last] = right;
-            }
-            double[] t = a; a = b; b = t;
-            int o = ao; ao = bo; bo = o;
+        while (--high > low) {
+            double max = a[low];
+            pushDown(a, low, a[high], low, high);
+            a[high] = max;
         }
     }
 
     /**
-     * Sorts the specified range of the array by Dual-Pivot Quicksort.
+     * Pushes specified element down during heap sort.
      *
-     * @param a the array to be sorted
-     * @param left the index of the first element, inclusive, to be sorted
-     * @param right the index of the last element, inclusive, to be sorted
-     * @param leftmost indicates if this part is the leftmost in the range
+     * @param a the given array
+     * @param p the start index
+     * @param value the given element
+     * @param low the index of the first element, inclusive, to be sorted
+     * @param high the index of the last element, exclusive, to be sorted
      */
-    private static void sort(double[] a, int left, int right, boolean leftmost) {
-        int length = right - left + 1;
-
-        // Use insertion sort on tiny arrays
-        if (length < INSERTION_SORT_THRESHOLD) {
-            if (leftmost) {
-                /*
-                 * Traditional (without sentinel) insertion sort,
-                 * optimized for server VM, is used in case of
-                 * the leftmost part.
-                 */
-                for (int i = left, j = i; i < right; j = ++i) {
-                    double ai = a[i + 1];
-                    while (ai < a[j]) {
-                        a[j + 1] = a[j];
-                        if (j-- == left) {
-                            break;
-                        }
-                    }
-                    a[j + 1] = ai;
-                }
-            } else {
-                /*
-                 * Skip the longest ascending sequence.
-                 */
-                do {
-                    if (left >= right) {
-                        return;
-                    }
-                } while (a[++left] >= a[left - 1]);
-
-                /*
-                 * Every element from adjoining part plays the role
-                 * of sentinel, therefore this allows us to avoid the
-                 * left range check on each iteration. Moreover, we use
-                 * the more optimized algorithm, so called pair insertion
-                 * sort, which is faster (in the context of Quicksort)
-                 * than traditional implementation of insertion sort.
-                 */
-                for (int k = left; ++left <= right; k = ++left) {
-                    double a1 = a[k], a2 = a[left];
-
-                    if (a1 < a2) {
-                        a2 = a1; a1 = a[left];
-                    }
-                    while (a1 < a[--k]) {
-                        a[k + 2] = a[k];
-                    }
-                    a[++k + 1] = a1;
-
-                    while (a2 < a[--k]) {
-                        a[k + 1] = a[k];
-                    }
-                    a[k + 1] = a2;
-                }
-                double last = a[right];
-
-                while (last < a[--right]) {
-                    a[right + 1] = a[right];
-                }
-                a[right + 1] = last;
+    private static void pushDown(double[] a, int p, double value, int low, int high) {
+        for (int k ;; a[p] = a[p = k]) {
+            k = (p << 1) - low + 2; // Index of the right child
+
+            if (k > high) {
+                break;
+            }
+            if (k == high || a[k] < a[k - 1]) {
+                --k;
+            }
+            if (a[k] <= value) {
+                break;
             }
-            return;
         }
-
-        // Inexpensive approximation of length / 7
-        int seventh = (length >> 3) + (length >> 6) + 1;
+        a[p] = value;
+    }
+
+    /**
+     * Tries to sort the specified range of the array.
+     *
+     * @param sorter parallel context
+     * @param a the array to be sorted
+     * @param low the index of the first element to be sorted
+     * @param size the array size
+     * @return true if finally sorted, false otherwise
+     */
+    private static boolean tryMergeRuns(Sorter sorter, double[] a, int low, int size) {
 
         /*
-         * Sort five evenly spaced elements around (and including) the
-         * center element in the range. These elements will be used for
-         * pivot selection as described below. The choice for spacing
-         * these elements was empirically determined to work well on
-         * a wide variety of inputs.
+         * The run array is constructed only if initial runs are
+         * long enough to continue, run[i] then holds start index
+         * of the i-th sequence of elements in non-descending order.
          */
-        int e3 = (left + right) >>> 1; // The midpoint
-        int e2 = e3 - seventh;
-        int e1 = e2 - seventh;
-        int e4 = e3 + seventh;
-        int e5 = e4 + seventh;
-
-        // Sort these elements using insertion sort
-        if (a[e2] < a[e1]) { double t = a[e2]; a[e2] = a[e1]; a[e1] = t; }
-
-        if (a[e3] < a[e2]) { double t = a[e3]; a[e3] = a[e2]; a[e2] = t;
-            if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-        }
-        if (a[e4] < a[e3]) { double t = a[e4]; a[e4] = a[e3]; a[e3] = t;
-            if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-            }
-        }
-        if (a[e5] < a[e4]) { double t = a[e5]; a[e5] = a[e4]; a[e4] = t;
-            if (t < a[e3]) { a[e4] = a[e3]; a[e3] = t;
-                if (t < a[e2]) { a[e3] = a[e2]; a[e2] = t;
-                    if (t < a[e1]) { a[e2] = a[e1]; a[e1] = t; }
-                }
-            }
-        }
-
-        // Pointers
-        int less  = left;  // The index of the first element of center part
-        int great = right; // The index before the first element of right part
-
-        if (a[e1] != a[e2] && a[e2] != a[e3] && a[e3] != a[e4] && a[e4] != a[e5]) {
-            /*
-             * Use the second and fourth of the five sorted elements as pivots.
-             * These values are inexpensive approximations of the first and
-             * second terciles of the array. Note that pivot1 <= pivot2.
-             */
-            double pivot1 = a[e2];
-            double pivot2 = a[e4];
-
-            /*
-             * The first and the last elements to be sorted are moved to the
-             * locations formerly occupied by the pivots. When partitioning
-             * is complete, the pivots are swapped back into their final
-             * positions, and excluded from subsequent sorting.
-             */
-            a[e2] = a[left];
-            a[e4] = a[right];
-
-            /*
-             * Skip elements, which are less or greater than pivot values.
-             */
-            while (a[++less] < pivot1);
-            while (a[--great] > pivot2);
+        int[] run = null;
+        int high = low + size;
+        int count = 1, last = low;
+
+        /*
+         * Identify all possible runs.
+         */
+        for (int k = low + 1; k < high; ) {
 
             /*
-             * Partitioning:
-             *
-             *   left part           center part                   right part
-             * +--------------------------------------------------------------+
-             * |  < pivot1  |  pivot1 <= && <= pivot2  |    ?    |  > pivot2  |
-             * +--------------------------------------------------------------+
-             *               ^                          ^       ^
-             *               |                          |       |
-             *              less                        k     great
-             *
-             * Invariants:
-             *
-             *              all in (left, less)   < pivot1
-             *    pivot1 <= all in [less, k)     <= pivot2
-             *              all in (great, right) > pivot2
-             *
-             * Pointer k is the first index of ?-part.
+             * Find the end index of the current run.
              */
-            outer:
-            for (int k = less - 1; ++k <= great; ) {
-                double ak = a[k];
-                if (ak < pivot1) { // Move a[k] to left part
-                    a[k] = a[less];
-                    /*
-                     * Here and below we use "a[i] = b; i++;" instead
-                     * of "a[i++] = b;" due to performance issue.
-                     */
-                    a[less] = ak;
-                    ++less;
-                } else if (ak > pivot2) { // Move a[k] to right part
-                    while (a[great] > pivot2) {
-                        if (great-- == k) {
-                            break outer;
-                        }
-                    }
-                    if (a[great] < pivot1) { // a[great] <= pivot2
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // pivot1 <= a[great] <= pivot2
-                        a[k] = a[great];
-                    }
-                    /*
-                     * Here and below we use "a[i] = b; i--;" instead
-                     * of "a[i--] = b;" due to performance issue.
-                     */
-                    a[great] = ak;
-                    --great;
-                }
-            }
-
-            // Swap pivots into their final positions
-            a[left]  = a[less  - 1]; a[less  - 1] = pivot1;
-            a[right] = a[great + 1]; a[great + 1] = pivot2;
-
-            // Sort left and right parts recursively, excluding known pivots
-            sort(a, left, less - 2, leftmost);
-            sort(a, great + 2, right, false);
-
-            /*
-             * If center part is too large (comprises > 4/7 of the array),
-             * swap internal pivot values to ends.
-             */
-            if (less < e1 && e5 < great) {
-                /*
-                 * Skip elements, which are equal to pivot values.
-                 */
-                while (a[less] == pivot1) {
-                    ++less;
-                }
-
-                while (a[great] == pivot2) {
-                    --great;
+            if (a[k - 1] < a[k]) {
+
+                // Identify ascending sequence
+                while (++k < high && a[k - 1] <= a[k]);
+
+            } else if (a[k - 1] > a[k]) {
+
+                // Identify descending sequence
+                while (++k < high && a[k - 1] >= a[k]);
+
+                // Reverse into ascending order
+                for (int i = last - 1, j = k; ++i < --j && a[i] > a[j]; ) {
+                    double ai = a[i]; a[i] = a[j]; a[j] = ai;
                 }
-
-                /*
-                 * Partitioning:
-                 *
-                 *   left part         center part                  right part
-                 * +----------------------------------------------------------+
-                 * | == pivot1 |  pivot1 < && < pivot2  |    ?    | == pivot2 |
-                 * +----------------------------------------------------------+
-                 *              ^                        ^       ^
-                 *              |                        |       |
-                 *             less                      k     great
-                 *
-                 * Invariants:
-                 *
-                 *              all in (*,  less) == pivot1
-                 *     pivot1 < all in [less,  k)  < pivot2
-                 *              all in (great, *) == pivot2
-                 *
-                 * Pointer k is the first index of ?-part.
-                 */
-                outer:
-                for (int k = less - 1; ++k <= great; ) {
-                    double ak = a[k];
-                    if (ak == pivot1) { // Move a[k] to left part
-                        a[k] = a[less];
-                        a[less] = ak;
-                        ++less;
-                    } else if (ak == pivot2) { // Move a[k] to right part
-                        while (a[great] == pivot2) {
-                            if (great-- == k) {
-                                break outer;
-                            }
-                        }
-                        if (a[great] == pivot1) { // a[great] < pivot2
-                            a[k] = a[less];
-                            /*
-                             * Even though a[great] equals to pivot1, the
-                             * assignment a[less] = pivot1 may be incorrect,
-                             * if a[great] and pivot1 are floating-point zeros
-                             * of different signs. Therefore in float and
-                             * double sorting methods we have to use more
-                             * accurate assignment a[less] = a[great].
-                             */
-                            a[less] = a[great];
-                            ++less;
-                        } else { // pivot1 < a[great] < pivot2
-                            a[k] = a[great];
-                        }
-                        a[great] = ak;
-                        --great;
-                    }
-                }
-            }
-
-            // Sort center part recursively
-            sort(a, less, great, false);
-
-        } else { // Partitioning with one pivot
-            /*
-             * Use the third of the five sorted elements as pivot.
-             * This value is inexpensive approximation of the median.
-             */
-            double pivot = a[e3];
-
-            /*
-             * Partitioning degenerates to the traditional 3-way
-             * (or "Dutch National Flag") schema:
-             *
-             *   left part    center part              right part
-             * +-------------------------------------------------+
-             * |  < pivot  |   == pivot   |     ?    |  > pivot  |
-             * +-------------------------------------------------+
-             *              ^              ^        ^
-             *              |              |        |
-             *             less            k      great
-             *
-             * Invariants:
-             *
-             *   all in (left, less)   < pivot
-             *   all in [less, k)     == pivot
-             *   all in (great, right) > pivot
-             *
-             * Pointer k is the first index of ?-part.
-             */
-            for (int k = less; k <= great; ++k) {
-                if (a[k] == pivot) {
+            } else { // Identify constant sequence
+                for (double ak = a[k]; ++k < high && ak == a[k]; );
+
+                if (k < high) {
                     continue;
                 }
-                double ak = a[k];
-                if (ak < pivot) { // Move a[k] to left part
-                    a[k] = a[less];
-                    a[less] = ak;
-                    ++less;
-                } else { // a[k] > pivot - Move a[k] to right part
-                    while (a[great] > pivot) {
-                        --great;
-                    }
-                    if (a[great] < pivot) { // a[great] <= pivot
-                        a[k] = a[less];
-                        a[less] = a[great];
-                        ++less;
-                    } else { // a[great] == pivot
-                        /*
-                         * Even though a[great] equals to pivot, the
-                         * assignment a[k] = pivot may be incorrect,
-                         * if a[great] and pivot are floating-point
-                         * zeros of different signs. Therefore in float
-                         * and double sorting methods we have to use
-                         * more accurate assignment a[k] = a[great].
-                         */
-                        a[k] = a[great];
-                    }
-                    a[great] = ak;
-                    --great;
-                }
             }
 
             /*
-             * Sort left and right parts recursively.
-             * All elements from center part are equal
-             * and, therefore, already sorted.
+             * Check special cases.
              */
-            sort(a, left, less - 1, leftmost);
-            sort(a, great + 1, right, false);
+            if (run == null) {
+                if (k == high) {
+
+                    /*
+                     * The array is monotonous sequence,
+                     * and therefore already sorted.
+                     */
+                    return true;
+                }
+
+                if (k - low < MIN_FIRST_RUN_SIZE) {
+
+                    /*
+                     * The first run is too small
+                     * to proceed with scanning.
+                     */
+                    return false;
+                }
+
+                run = new int[((size >> 10) | 0x7F) & 0x3FF];
+                run[0] = low;
+
+            } else if (a[last - 1] > a[last]) {
+
+                if (count > (k - low) >> MIN_FIRST_RUNS_FACTOR) {
+
+                    /*
+                     * The first runs are not long
+                     * enough to continue scanning.
+                     */
+                    return false;
+                }
+
+                if (++count == MAX_RUN_CAPACITY) {
+
+                    /*
+                     * Array is not highly structured.
+                     */
+                    return false;
+                }
+
+                if (count == run.length) {
+
+                    /*
+                     * Increase capacity of index array.
+                     */
+                    run = Arrays.copyOf(run, count << 1);
+                }
+            }
+            run[count] = (last = k);
+        }
+
+        /*
+         * Merge runs of highly structured array.
+         */
+        if (count > 1) {
+            double[] b; int offset = low;
+
+            if (sorter == null || (b = (double[]) sorter.b) == null) {
+                b = new double[size];
+            } else {
+                offset = sorter.offset;
+            }
+            mergeRuns(a, b, offset, 1, sorter != null, run, 0, count);
+        }
+        return true;
+    }
+
+    /**
+     * Merges the specified runs.
+     *
+     * @param a the source array
+     * @param b the temporary buffer used in merging
+     * @param offset the start index in the source, inclusive
+     * @param aim specifies merging: to source ( > 0), buffer ( < 0) or any ( == 0)
+     * @param parallel indicates whether merging is performed in parallel
+     * @param run the start indexes of the runs, inclusive
+     * @param lo the start index of the first run, inclusive
+     * @param hi the start index of the last run, inclusive
+     * @return the destination where runs are merged
+     */
+    private static double[] mergeRuns(double[] a, double[] b, int offset,
+            int aim, boolean parallel, int[] run, int lo, int hi) {
+
+        if (hi - lo == 1) {
+            if (aim >= 0) {
+                return a;
+            }
+            for (int i = run[hi], j = i - offset, low = run[lo]; i > low;
+                b[--j] = a[--i]
+            );
+            return b;
+        }
+
+        /*
+         * Split into approximately equal parts.
+         */
+        int mi = lo, rmi = (run[lo] + run[hi]) >>> 1;
+        while (run[++mi + 1] <= rmi);
+
+        /*
+         * Merge the left and right parts.
+         */
+        double[] a1, a2;
+
+        if (parallel && hi - lo > MIN_RUN_COUNT) {
+            RunMerger merger = new RunMerger(a, b, offset, 0, run, mi, hi).forkMe();
+            a1 = mergeRuns(a, b, offset, -aim, true, run, lo, mi);
+            a2 = (double[]) merger.getDestination();
+        } else {
+            a1 = mergeRuns(a, b, offset, -aim, false, run, lo, mi);
+            a2 = mergeRuns(a, b, offset,    0, false, run, mi, hi);
+        }
+
+        double[] dst = a1 == a ? b : a;
+
+        int k   = a1 == a ? run[lo] - offset : run[lo];
+        int lo1 = a1 == b ? run[lo] - offset : run[lo];
+        int hi1 = a1 == b ? run[mi] - offset : run[mi];
+        int lo2 = a2 == b ? run[mi] - offset : run[mi];
+        int hi2 = a2 == b ? run[hi] - offset : run[hi];
+
+        if (parallel) {
+            new Merger(null, dst, k, a1, lo1, hi1, a2, lo2, hi2).invoke();
+        } else {
+            mergeParts(null, dst, k, a1, lo1, hi1, a2, lo2, hi2);
+        }
+        return dst;
+    }
+
+    /**
+     * Merges the sorted parts.
+     *
+     * @param merger parallel context
+     * @param dst the destination where parts are merged
+     * @param k the start index of the destination, inclusive
+     * @param a1 the first part
+     * @param lo1 the start index of the first part, inclusive
+     * @param hi1 the end index of the first part, exclusive
+     * @param a2 the second part
+     * @param lo2 the start index of the second part, inclusive
+     * @param hi2 the end index of the second part, exclusive
+     */
+    private static void mergeParts(Merger merger, double[] dst, int k,
+            double[] a1, int lo1, int hi1, double[] a2, int lo2, int hi2) {
+
+        if (merger != null && a1 == a2) {
+
+            while (true) {
+
+                /*
+                 * The first part must be larger.
+                 */
+                if (hi1 - lo1 < hi2 - lo2) {
+                    int lo = lo1; lo1 = lo2; lo2 = lo;
+                    int hi = hi1; hi1 = hi2; hi2 = hi;
+                }
+
+                /*
+                 * Small parts will be merged sequentially.
+                 */
+                if (hi1 - lo1 < MIN_PARALLEL_MERGE_PARTS_SIZE) {
+                    break;
+                }
+
+                /*
+                 * Find the median of the larger part.
+                 */
+                int mi1 = (lo1 + hi1) >>> 1;
+                double key = a1[mi1];
+                int mi2 = hi2;
+
+                /*
+                 * Partition the smaller part.
+                 */
+                for (int loo = lo2; loo < mi2; ) {
+                    int t = (loo + mi2) >>> 1;
+
+                    if (key > a2[t]) {
+                        loo = t + 1;
+                    } else {
+                        mi2 = t;
+                    }
+                }
+
+                int d = mi2 - lo2 + mi1 - lo1;
+
+                /*
+                 * Merge the right sub-parts in parallel.
+                 */
+                merger.forkMerger(dst, k + d, a1, mi1, hi1, a2, mi2, hi2);
+
+                /*
+                 * Process the sub-left parts.
+                 */
+                hi1 = mi1;
+                hi2 = mi2;
+            }
+        }
+
+        /*
+         * Merge small parts sequentially.
+         */
+        while (lo1 < hi1 && lo2 < hi2) {
+            dst[k++] = a1[lo1] < a2[lo2] ? a1[lo1++] : a2[lo2++];
+        }
+        if (dst != a1 || k < lo1) {
+            while (lo1 < hi1) {
+                dst[k++] = a1[lo1++];
+            }
+        }
+        if (dst != a2 || k < lo2) {
+            while (lo2 < hi2) {
+                dst[k++] = a2[lo2++];
+            }
+        }
+    }
+
+// [class]
+
+    /**
+     * This class implements parallel sorting.
+     */
+    private static final class Sorter extends CountedCompleter<Void> {
+        private static final long serialVersionUID = 20180818L;
+        private final Object a, b;
+        private final int low, size, offset, depth;
+
+        private Sorter(CountedCompleter<?> parent,
+                Object a, Object b, int low, int size, int offset, int depth) {
+            super(parent);
+            this.a = a;
+            this.b = b;
+            this.low = low;
+            this.size = size;
+            this.offset = offset;
+            this.depth = depth;
+        }
+
+        @Override
+        public final void compute() {
+            if (depth < 0) {
+                setPendingCount(2);
+                int half = size >> 1;
+                new Sorter(this, b, a, low, half, offset, depth + 1).fork();
+                new Sorter(this, b, a, low + half, size - half, offset, depth + 1).compute();
+            } else {
+                if (a instanceof int[]) {
+                    sort(this, (int[]) a, depth, low, low + size);
+                } else if (a instanceof long[]) {
+                    sort(this, (long[]) a, depth, low, low + size);
+                } else if (a instanceof float[]) {
+                    sort(this, (float[]) a, depth, low, low + size);
+                } else if (a instanceof double[]) {
+                    sort(this, (double[]) a, depth, low, low + size);
+                } else {
+                    throw new IllegalArgumentException(
+                        "Unknown type of array: " + a.getClass().getName());
+                }
+            }
+            tryComplete();
+        }
+
+        @Override
+        public final void onCompletion(CountedCompleter<?> caller) {
+            if (depth < 0) {
+                int mi = low + (size >> 1);
+                boolean src = (depth & 1) == 0;
+
+                new Merger(null,
+                    a,
+                    src ? low : low - offset,
+                    b,
+                    src ? low - offset : low,
+                    src ? mi - offset : mi,
+                    b,
+                    src ? mi - offset : mi,
+                    src ? low + size - offset : low + size
+                ).invoke();
+            }
+        }
+
+        private void forkSorter(int depth, int low, int high) {
+            addToPendingCount(1);
+            Object a = this.a; // Use local variable for performance
+            new Sorter(this, a, b, low, high - low, offset, depth).fork();
+        }
+    }
+
+    /**
+     * This class implements parallel merging.
+     */
+    private static final class Merger extends CountedCompleter<Void> {
+        private static final long serialVersionUID = 20180818L;
+        private final Object dst, a1, a2;
+        private final int k, lo1, hi1, lo2, hi2;
+
+        private Merger(CountedCompleter<?> parent, Object dst, int k,
+                Object a1, int lo1, int hi1, Object a2, int lo2, int hi2) {
+            super(parent);
+            this.dst = dst;
+            this.k = k;
+            this.a1 = a1;
+            this.lo1 = lo1;
+            this.hi1 = hi1;
+            this.a2 = a2;
+            this.lo2 = lo2;
+            this.hi2 = hi2;
+        }
+
+        @Override
+        public final void compute() {
+            if (dst instanceof int[]) {
+                mergeParts(this, (int[]) dst, k,
+                    (int[]) a1, lo1, hi1, (int[]) a2, lo2, hi2);
+            } else if (dst instanceof long[]) {
+                mergeParts(this, (long[]) dst, k,
+                    (long[]) a1, lo1, hi1, (long[]) a2, lo2, hi2);
+            } else if (dst instanceof float[]) {
+                mergeParts(this, (float[]) dst, k,
+                    (float[]) a1, lo1, hi1, (float[]) a2, lo2, hi2);
+            } else if (dst instanceof double[]) {
+                mergeParts(this, (double[]) dst, k,
+                    (double[]) a1, lo1, hi1, (double[]) a2, lo2, hi2);
+            } else {
+                throw new IllegalArgumentException(
+                    "Unknown type of array: " + dst.getClass().getName());
+            }
+            propagateCompletion();
+        }
+
+        private void forkMerger(Object dst, int k,
+                Object a1, int lo1, int hi1, Object a2, int lo2, int hi2) {
+            addToPendingCount(1);
+            new Merger(this, dst, k, a1, lo1, hi1, a2, lo2, hi2).fork();
+        }
+    }
+
+    /**
+     * This class implements parallel merging of runs.
+     */
+    private static final class RunMerger extends RecursiveTask<Object> {
+        private static final long serialVersionUID = 20180818L;
+        private final Object a, b;
+        private final int[] run;
+        private final int offset, aim, lo, hi;
+
+        private RunMerger(Object a, Object b, int offset,
+                int aim, int[] run, int lo, int hi) {
+            this.a = a;
+            this.b = b;
+            this.offset = offset;
+            this.aim = aim;
+            this.run = run;
+            this.lo = lo;
+            this.hi = hi;
+        }
+
+        @Override
+        protected final Object compute() {
+            if (a instanceof int[]) {
+                return mergeRuns((int[]) a, (int[]) b, offset, aim, true, run, lo, hi);
+            }
+            if (a instanceof long[]) {
+                return mergeRuns((long[]) a, (long[]) b, offset, aim, true, run, lo, hi);
+            }
+            if (a instanceof float[]) {
+                return mergeRuns((float[]) a, (float[]) b, offset, aim, true, run, lo, hi);
+            }
+            if (a instanceof double[]) {
+                return mergeRuns((double[]) a, (double[]) b, offset, aim, true, run, lo, hi);
+            }
+            throw new IllegalArgumentException(
+                "Unknown type of array: " + a.getClass().getName());
+        }
+
+        private RunMerger forkMe() {
+            fork();
+            return this;
+        }
+
+        private Object getDestination() {
+            join();
+            return getRawResult();
         }
     }
 }
--- a/src/java.base/share/classes/javax/crypto/Cipher.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/javax/crypto/Cipher.java	Thu Nov 14 13:50:03 2019 +0000
@@ -147,10 +147,6 @@
  * <li>{@code AES/ECB/NoPadding} (128)</li>
  * <li>{@code AES/ECB/PKCS5Padding} (128)</li>
  * <li>{@code AES/GCM/NoPadding} (128)</li>
- * <li>{@code DES/CBC/NoPadding} (56)</li>
- * <li>{@code DES/CBC/PKCS5Padding} (56)</li>
- * <li>{@code DES/ECB/NoPadding} (56)</li>
- * <li>{@code DES/ECB/PKCS5Padding} (56)</li>
  * <li>{@code DESede/CBC/NoPadding} (168)</li>
  * <li>{@code DESede/CBC/PKCS5Padding} (168)</li>
  * <li>{@code DESede/ECB/NoPadding} (168)</li>
--- a/src/java.base/share/classes/javax/crypto/CryptoPermissions.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/javax/crypto/CryptoPermissions.java	Thu Nov 14 13:50:03 2019 +0000
@@ -40,6 +40,8 @@
 import java.io.ObjectOutputStream;
 import java.io.IOException;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * This class contains CryptoPermission objects, organized into
  * PermissionCollections according to algorithm names.
@@ -99,7 +101,7 @@
     void load(InputStream in)
         throws IOException, CryptoPolicyParser.ParsingException {
         CryptoPolicyParser parser = new CryptoPolicyParser();
-        parser.read(new BufferedReader(new InputStreamReader(in, "UTF-8")));
+        parser.read(new BufferedReader(new InputStreamReader(in, UTF_8)));
 
         CryptoPermission[] parsingResult = parser.getPermissions();
         for (int i = 0; i < parsingResult.length; i++) {
--- a/src/java.base/share/classes/javax/crypto/KeyGenerator.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/javax/crypto/KeyGenerator.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,7 +97,6 @@
  * parentheses:
  * <ul>
  * <li>{@code AES} (128)</li>
- * <li>{@code DES} (56)</li>
  * <li>{@code DESede} (168)</li>
  * <li>{@code HmacSHA1}</li>
  * <li>{@code HmacSHA256}</li>
--- a/src/java.base/share/classes/javax/crypto/Mac.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/javax/crypto/Mac.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -56,7 +56,6 @@
  * <p> Every implementation of the Java platform is required to support
  * the following standard {@code Mac} algorithms:
  * <ul>
- * <li>{@code HmacMD5}</li>
  * <li>{@code HmacSHA1}</li>
  * <li>{@code HmacSHA256}</li>
  * </ul>
--- a/src/java.base/share/classes/javax/crypto/SecretKeyFactory.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/javax/crypto/SecretKeyFactory.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,16 +52,13 @@
  * {@link #generateSecret(java.security.spec.KeySpec) generateSecret} and
  * {@link #getKeySpec(javax.crypto.SecretKey, java.lang.Class) getKeySpec}
  * methods.
- * For example, the DES secret-key factory supplied by the "SunJCE" provider
- * supports {@code DESKeySpec} as a transparent representation of DES
- * keys, and that provider's secret-key factory for Triple DES keys supports
- * {@code DESedeKeySpec} as a transparent representation of Triple DES
- * keys.
+ * For example, the DESede (Triple DES) secret-key factory supplied by the
+ * "SunJCE" provider supports {@code DESedeKeySpec} as a transparent
+ * representation of Triple DES keys.
  *
  * <p> Every implementation of the Java platform is required to support the
  * following standard {@code SecretKeyFactory} algorithms:
  * <ul>
- * <li>{@code DES}</li>
  * <li>{@code DESede}</li>
  * </ul>
  * These algorithms are described in the <a href=
@@ -74,7 +71,6 @@
  * @author Jan Luehe
  *
  * @see SecretKey
- * @see javax.crypto.spec.DESKeySpec
  * @see javax.crypto.spec.DESedeKeySpec
  * @see javax.crypto.spec.PBEKeySpec
  * @since 1.4
--- a/src/java.base/share/classes/jdk/internal/PreviewFeature.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/jdk/internal/PreviewFeature.java	Thu Nov 14 13:50:03 2019 +0000
@@ -54,7 +54,6 @@
     public boolean essentialAPI() default false;
 
     public enum Feature {
-        SWITCH_EXPRESSIONS,
         TEXT_BLOCKS;
     }
 }
--- a/src/java.base/share/classes/sun/net/www/protocol/https/DelegateHttpsURLConnection.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/net/www/protocol/https/DelegateHttpsURLConnection.java	Thu Nov 14 13:50:03 2019 +0000
@@ -72,13 +72,4 @@
     protected javax.net.ssl.HostnameVerifier getHostnameVerifier() {
         return httpsURLConnection.getHostnameVerifier();
     }
-
-    /*
-     * Called by layered delegator's finalize() method to handle closing
-     * the underlying object.
-     */
-    @SuppressWarnings("deprecation")
-    protected void dispose() throws Throwable {
-        super.finalize();
-    }
 }
--- a/src/java.base/share/classes/sun/net/www/protocol/https/HttpsURLConnectionImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/net/www/protocol/https/HttpsURLConnectionImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -461,16 +461,6 @@
         delegate.setDefaultUseCaches(defaultusecaches);
     }
 
-    /*
-     * finalize (dispose) the delegated object.  Otherwise
-     * sun.net.www.protocol.http.HttpURLConnection's finalize()
-     * would have to be made public.
-     */
-    @SuppressWarnings("deprecation")
-    protected void finalize() throws Throwable {
-        delegate.dispose();
-    }
-
     public boolean equals(Object obj) {
         return this == obj || ((obj instanceof HttpsURLConnectionImpl) &&
             delegate.equals(((HttpsURLConnectionImpl)obj).delegate));
--- a/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/nio/ch/DatagramChannelImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -239,6 +239,40 @@
         }
     }
 
+    /**
+     * Returns the protocol family to specify to set/getSocketOption for the
+     * given socket option.
+     */
+    private ProtocolFamily familyFor(SocketOption<?> name) {
+        assert Thread.holdsLock(stateLock);
+
+        // unspecified (most options)
+        if (SocketOptionRegistry.findOption(name, Net.UNSPEC) != null)
+            return Net.UNSPEC;
+
+        // IPv4 socket
+        if (family == StandardProtocolFamily.INET)
+            return StandardProtocolFamily.INET;
+
+        // IPv6 socket that is unbound
+        if (localAddress == null)
+            return StandardProtocolFamily.INET6;
+
+        // IPv6 socket bound to wildcard or IPv6 address
+        InetAddress address = localAddress.getAddress();
+        if (address.isAnyLocalAddress() || (address instanceof Inet6Address))
+            return StandardProtocolFamily.INET6;
+
+        // IPv6 socket bound to IPv4 address
+        if (Net.canUseIPv6OptionsWithIPv4LocalAddress()) {
+            // IPV6_XXX options can be used
+            return StandardProtocolFamily.INET6;
+        } else {
+            // IPV6_XXX options cannot be used
+            return StandardProtocolFamily.INET;
+        }
+    }
+
     @Override
     public <T> DatagramChannel setOption(SocketOption<T> name, T value)
         throws IOException
@@ -252,14 +286,7 @@
         synchronized (stateLock) {
             ensureOpen();
 
-            if (name == StandardSocketOptions.IP_TOS ||
-                name == StandardSocketOptions.IP_MULTICAST_TTL ||
-                name == StandardSocketOptions.IP_MULTICAST_LOOP)
-            {
-                // options are protocol dependent
-                Net.setSocketOption(fd, family, name, value);
-                return this;
-            }
+            ProtocolFamily family = familyFor(name);
 
             if (name == StandardSocketOptions.IP_MULTICAST_IF) {
                 NetworkInterface interf = (NetworkInterface)value;
@@ -285,7 +312,7 @@
             }
 
             // remaining options don't need any special handling
-            Net.setSocketOption(fd, Net.UNSPEC, name, value);
+            Net.setSocketOption(fd, family, name, value);
             return this;
         }
     }
@@ -302,12 +329,7 @@
         synchronized (stateLock) {
             ensureOpen();
 
-            if (name == StandardSocketOptions.IP_TOS ||
-                name == StandardSocketOptions.IP_MULTICAST_TTL ||
-                name == StandardSocketOptions.IP_MULTICAST_LOOP)
-            {
-                return (T) Net.getSocketOption(fd, family, name);
-            }
+            ProtocolFamily family = familyFor(name);
 
             if (name == StandardSocketOptions.IP_MULTICAST_IF) {
                 if (family == StandardProtocolFamily.INET) {
@@ -333,11 +355,11 @@
             }
 
             if (name == StandardSocketOptions.SO_REUSEADDR && reuseAddressEmulated) {
-                return (T)Boolean.valueOf(isReuseAddress);
+                return (T) Boolean.valueOf(isReuseAddress);
             }
 
             // no special handling
-            return (T) Net.getSocketOption(fd, Net.UNSPEC, name);
+            return (T) Net.getSocketOption(fd, family, name);
         }
     }
 
--- a/src/java.base/share/classes/sun/nio/ch/Net.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/nio/ch/Net.java	Thu Nov 14 13:50:03 2019 +0000
@@ -121,6 +121,14 @@
         return canJoin6WithIPv4Group0();
     }
 
+    /**
+     * Tells whether IPV6_XXX socket options should be used on an IPv6 socket
+     * that is bound to an IPv4 address.
+     */
+    static boolean canUseIPv6OptionsWithIPv4LocalAddress() {
+        return canUseIPv6OptionsWithIPv4LocalAddress0();
+    }
+
     public static InetSocketAddress checkAddress(SocketAddress sa) {
         if (sa == null)
             throw new NullPointerException();
@@ -434,6 +442,8 @@
 
     private static native boolean canJoin6WithIPv4Group0();
 
+    private static native boolean canUseIPv6OptionsWithIPv4LocalAddress0();
+
     static FileDescriptor socket(boolean stream) throws IOException {
         return socket(UNSPEC, stream);
     }
--- a/src/java.base/share/classes/sun/security/pkcs12/PKCS12KeyStore.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/pkcs12/PKCS12KeyStore.java	Thu Nov 14 13:50:03 2019 +0000
@@ -51,6 +51,8 @@
 import java.security.spec.PKCS8EncodedKeySpec;
 import java.util.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import java.security.AlgorithmParameters;
 import java.security.InvalidAlgorithmParameterException;
 import javax.crypto.spec.PBEParameterSpec;
@@ -687,12 +689,14 @@
                 entry.attributes.addAll(attributes);
             }
             // set the keyId to current date
-            entry.keyId = ("Time " + (entry.date).getTime()).getBytes("UTF8");
+            entry.keyId = ("Time " + (entry.date).getTime()).getBytes(UTF_8);
             // set the alias
             entry.alias = alias.toLowerCase(Locale.ENGLISH);
             // add the entry
             entries.put(alias.toLowerCase(Locale.ENGLISH), entry);
 
+        } catch (KeyStoreException kse) {
+            throw kse;
         } catch (Exception nsae) {
             throw new KeyStoreException("Key protection" +
                        " algorithm not found: " + nsae, nsae);
@@ -746,12 +750,8 @@
                 alias + "'");
         }
 
-        try {
-            // set the keyId to current date
-            entry.keyId = ("Time " + (entry.date).getTime()).getBytes("UTF8");
-        } catch (UnsupportedEncodingException ex) {
-            // Won't happen
-        }
+        // set the keyId to current date
+        entry.keyId = ("Time " + (entry.date).getTime()).getBytes(UTF_8);
         // set the alias
         entry.alias = alias.toLowerCase(Locale.ENGLISH);
 
@@ -2499,18 +2499,18 @@
                        // attribute in pkcs12 with one private key entry and
                        // associated cert-chain
                        if (privateKeyCount == 1) {
-                            keyId = "01".getBytes("UTF8");
+                            keyId = "01".getBytes(UTF_8);
                        } else {
                             continue;
                        }
                     } else {
                         // keyId in a SecretKeyEntry is not significant
-                        keyId = "00".getBytes("UTF8");
+                        keyId = "00".getBytes(UTF_8);
                     }
                 }
                 entry.keyId = keyId;
                 // restore date if it exists
-                String keyIdStr = new String(keyId, "UTF8");
+                String keyIdStr = new String(keyId, UTF_8);
                 Date date = null;
                 if (keyIdStr.startsWith("Time ")) {
                     try {
@@ -2547,7 +2547,7 @@
                 if ((keyId == null) && (privateKeyCount == 1)) {
                     // insert localKeyID only for EE cert or self-signed cert
                     if (i == 0) {
-                        keyId = "01".getBytes("UTF8");
+                        keyId = "01".getBytes(UTF_8);
                     }
                 }
                 // Trusted certificate
--- a/src/java.base/share/classes/sun/security/provider/ConfigFile.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/provider/ConfigFile.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,6 +46,8 @@
 import sun.security.util.PropertyExpander;
 import sun.security.util.ResourcesMgr;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * This class represents a default implementation for
  * {@code javax.security.auth.login.Configuration}.
@@ -325,7 +327,7 @@
                           throws IOException {
 
             try (InputStreamReader isr
-                    = new InputStreamReader(getInputStream(config), "UTF-8")) {
+                    = new InputStreamReader(getInputStream(config), UTF_8)) {
                 readConfig(isr, newConfig);
             } catch (FileNotFoundException fnfe) {
                 if (debugConfig != null) {
--- a/src/java.base/share/classes/sun/security/provider/DomainKeyStore.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/provider/DomainKeyStore.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
 import java.security.cert.CertificateException;
 import java.util.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import sun.security.pkcs.EncryptedPrivateKeyInfo;
 import sun.security.util.PolicyUtil;
 
@@ -768,7 +770,7 @@
 
         try (InputStreamReader configurationReader =
             new InputStreamReader(
-                PolicyUtil.getInputStream(configuration.toURL()), "UTF-8")) {
+                PolicyUtil.getInputStream(configuration.toURL()), UTF_8)) {
             parser.read(configurationReader);
             domains = parser.getDomainEntries();
 
--- a/src/java.base/share/classes/sun/security/provider/JavaKeyStore.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/provider/JavaKeyStore.java	Thu Nov 14 13:50:03 2019 +0000
@@ -32,6 +32,8 @@
 import java.security.cert.CertificateException;
 import java.util.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import sun.security.pkcs.EncryptedPrivateKeyInfo;
 import sun.security.pkcs12.PKCS12KeyStore;
 import sun.security.util.Debug;
@@ -805,14 +807,14 @@
      * hash with a bit of whitener.
      */
     private MessageDigest getPreKeyedHash(char[] password)
-        throws NoSuchAlgorithmException, UnsupportedEncodingException
+        throws NoSuchAlgorithmException
     {
 
         MessageDigest md = MessageDigest.getInstance("SHA");
         byte[] passwdBytes = convertToBytes(password);
         md.update(passwdBytes);
         Arrays.fill(passwdBytes, (byte) 0x00);
-        md.update("Mighty Aphrodite".getBytes("UTF8"));
+        md.update("Mighty Aphrodite".getBytes(UTF_8));
         return md;
     }
 
--- a/src/java.base/share/classes/sun/security/provider/KeyProtector.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/provider/KeyProtector.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 package sun.security.provider;
 
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import java.security.Key;
 import java.security.KeyStoreException;
 import java.security.MessageDigest;
--- a/src/java.base/share/classes/sun/security/provider/PolicyFile.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/provider/PolicyFile.java	Thu Nov 14 13:50:03 2019 +0000
@@ -42,12 +42,14 @@
 import java.net.NetPermission;
 import java.util.concurrent.ConcurrentHashMap;
 import jdk.internal.access.JavaSecurityAccess;
-import static jdk.internal.access.JavaSecurityAccess.ProtectionDomainCache;
 import jdk.internal.access.SharedSecrets;
 import jdk.internal.util.StaticProperty;
 import sun.security.util.*;
 import sun.net.www.ParseUtil;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static jdk.internal.access.JavaSecurityAccess.ProtectionDomainCache;
+
 /**
  * This class represents a default Policy implementation for the
  * "JavaPolicy" type.
@@ -559,8 +561,7 @@
         return false;
     }
 
-    private InputStreamReader getInputStreamReader(InputStream is)
-                              throws IOException {
+    private InputStreamReader getInputStreamReader(InputStream is) {
         /*
          * Read in policy using UTF-8 by default.
          *
@@ -569,7 +570,7 @@
          */
         return (notUtf8)
             ? new InputStreamReader(is)
-            : new InputStreamReader(is, "UTF-8");
+            : new InputStreamReader(is, UTF_8);
     }
 
     private void initStaticPolicy(final PolicyInfo newInfo) {
--- a/src/java.base/share/classes/sun/security/ssl/SSLExtension.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/ssl/SSLExtension.java	Thu Nov 14 13:50:03 2019 +0000
@@ -696,8 +696,18 @@
             }
 
             // To switch off the max_fragment_length extension.
+            //
+            // Note that "jsse.enableMFLNExtension" is the CSR approved
+            // property name.  However, "jsse.enableMFLExtension" was used
+            // in the original implementation.  Temporarily, if either of
+            // the two properties set to true, the extension is switch on.
+            // We may remove the "jsse.enableMFLExtension" property in the
+            // future.  Please don't continue to use the misspelling property.
             enableExtension =
-                Utilities.getBooleanProperty("jsse.enableMFLExtension", false);
+                Utilities.getBooleanProperty(
+                        "jsse.enableMFLNExtension", false) ||
+                Utilities.getBooleanProperty(
+                        "jsse.enableMFLExtension", false);
             if (!enableExtension) {
                 extensions.remove(CH_MAX_FRAGMENT_LENGTH);
             }
--- a/src/java.base/share/classes/sun/security/ssl/SSLLogger.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/ssl/SSLLogger.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,8 @@
 import sun.security.util.HexDumpEncoder;
 import sun.security.x509.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * Implementation of SSL logger.
  *
@@ -229,7 +231,7 @@
                 try {
                     String formatted =
                         SSLSimpleFormatter.format(this, level, message, thrwbl);
-                    System.err.write(formatted.getBytes("UTF-8"));
+                    System.err.write(formatted.getBytes(UTF_8));
                 } catch (Exception exp) {
                     // ignore it, just for debugging.
                 }
@@ -243,7 +245,7 @@
                 try {
                     String formatted =
                         SSLSimpleFormatter.format(this, level, message, params);
-                    System.err.write(formatted.getBytes("UTF-8"));
+                    System.err.write(formatted.getBytes(UTF_8));
                 } catch (Exception exp) {
                     // ignore it, just for debugging.
                 }
--- a/src/java.base/share/classes/sun/security/util/DerInputStream.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/util/DerInputStream.java	Thu Nov 14 13:50:03 2019 +0000
@@ -27,9 +27,12 @@
 
 import java.io.InputStream;
 import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.charset.Charset;
 import java.util.Date;
 import java.util.Vector;
-import java.math.BigInteger;
+
+import static java.nio.charset.StandardCharsets.*;
 
 /**
  * A DER input stream, used for parsing ASN.1 DER-encoded data such as
@@ -457,7 +460,7 @@
      * Read a string that was encoded as a UTF8String DER value.
      */
     public String getUTF8String() throws IOException {
-        return readString(DerValue.tag_UTF8String, "UTF-8", "UTF8");
+        return readString(DerValue.tag_UTF8String, "UTF-8", UTF_8);
     }
 
     /**
@@ -465,7 +468,7 @@
      */
     public String getPrintableString() throws IOException {
         return readString(DerValue.tag_PrintableString, "Printable",
-                          "ASCII");
+                          US_ASCII);
     }
 
     /**
@@ -475,22 +478,21 @@
         /*
          * Works for common characters between T61 and ASCII.
          */
-        return readString(DerValue.tag_T61String, "T61", "ISO-8859-1");
+        return readString(DerValue.tag_T61String, "T61", ISO_8859_1);
     }
 
     /**
-     * Read a string that was encoded as a IA5tring DER value.
+     * Read a string that was encoded as a IA5String DER value.
      */
     public String getIA5String() throws IOException {
-        return readString(DerValue.tag_IA5String, "IA5", "ASCII");
+        return readString(DerValue.tag_IA5String, "IA5", US_ASCII);
     }
 
     /**
      * Read a string that was encoded as a BMPString DER value.
      */
     public String getBMPString() throws IOException {
-        return readString(DerValue.tag_BMPString, "BMP",
-                          "UnicodeBigUnmarked");
+        return readString(DerValue.tag_BMPString, "BMP", UTF_16BE);
     }
 
     /**
@@ -498,7 +500,7 @@
      */
     public String getGeneralString() throws IOException {
         return readString(DerValue.tag_GeneralString, "General",
-                          "ASCII");
+                          US_ASCII);
     }
 
     /**
@@ -510,7 +512,7 @@
      * correspond to the stringTag above.
      */
     private String readString(byte stringTag, String stringName,
-                              String enc) throws IOException {
+                              Charset charset) throws IOException {
 
         if (buffer.read() != stringTag)
             throw new IOException("DER input not a " +
@@ -522,7 +524,7 @@
             throw new IOException("Short read of DER " +
                                   stringName + " string");
 
-        return new String(retval, enc);
+        return new String(retval, charset);
     }
 
     /**
--- a/src/java.base/share/classes/sun/security/util/DerOutputStream.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/util/DerOutputStream.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,14 +28,16 @@
 import java.io.ByteArrayOutputStream;
 import java.io.OutputStream;
 import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.charset.Charset;
 import java.text.SimpleDateFormat;
 import java.util.Date;
 import java.util.TimeZone;
 import java.util.Comparator;
 import java.util.Arrays;
-import java.math.BigInteger;
 import java.util.Locale;
 
+import static java.nio.charset.StandardCharsets.*;
 
 /**
  * Output stream marshaling DER-encoded data.  This is eventually provided
@@ -398,14 +400,14 @@
      * Marshals a string as a DER encoded UTF8String.
      */
     public void putUTF8String(String s) throws IOException {
-        writeString(s, DerValue.tag_UTF8String, "UTF8");
+        writeString(s, DerValue.tag_UTF8String, UTF_8);
     }
 
     /**
      * Marshals a string as a DER encoded PrintableString.
      */
     public void putPrintableString(String s) throws IOException {
-        writeString(s, DerValue.tag_PrintableString, "ASCII");
+        writeString(s, DerValue.tag_PrintableString, US_ASCII);
     }
 
     /**
@@ -416,28 +418,28 @@
          * Works for characters that are defined in both ASCII and
          * T61.
          */
-        writeString(s, DerValue.tag_T61String, "ISO-8859-1");
+        writeString(s, DerValue.tag_T61String, ISO_8859_1);
     }
 
     /**
      * Marshals a string as a DER encoded IA5String.
      */
     public void putIA5String(String s) throws IOException {
-        writeString(s, DerValue.tag_IA5String, "ASCII");
+        writeString(s, DerValue.tag_IA5String, US_ASCII);
     }
 
     /**
      * Marshals a string as a DER encoded BMPString.
      */
     public void putBMPString(String s) throws IOException {
-        writeString(s, DerValue.tag_BMPString, "UnicodeBigUnmarked");
+        writeString(s, DerValue.tag_BMPString, UTF_16BE);
     }
 
     /**
      * Marshals a string as a DER encoded GeneralString.
      */
     public void putGeneralString(String s) throws IOException {
-        writeString(s, DerValue.tag_GeneralString, "ASCII");
+        writeString(s, DerValue.tag_GeneralString, US_ASCII);
     }
 
     /**
@@ -448,10 +450,10 @@
      * @param enc the name of the encoder that should be used corresponding
      * to the above tag.
      */
-    private void writeString(String s, byte stringTag, String enc)
+    private void writeString(String s, byte stringTag, Charset charset)
         throws IOException {
 
-        byte[] data = s.getBytes(enc);
+        byte[] data = s.getBytes(charset);
         write(stringTag);
         putLength(data.length);
         write(data);
@@ -502,7 +504,7 @@
 
         SimpleDateFormat sdf = new SimpleDateFormat(pattern, Locale.US);
         sdf.setTimeZone(tz);
-        byte[] time = (sdf.format(d)).getBytes("ISO-8859-1");
+        byte[] time = (sdf.format(d)).getBytes(ISO_8859_1);
 
         /*
          * Write the formatted date.
--- a/src/java.base/share/classes/sun/security/util/DerValue.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/util/DerValue.java	Thu Nov 14 13:50:03 2019 +0000
@@ -27,8 +27,11 @@
 
 import java.io.*;
 import java.math.BigInteger;
+import java.nio.charset.Charset;
 import java.util.Date;
 
+import static java.nio.charset.StandardCharsets.*;
+
 /**
  * Represents a single DER-encoded value.  DER encoding rules are a subset
  * of the "Basic" Encoding Rules (BER), but they only support a single way
@@ -204,7 +207,7 @@
     /**
      * Creates a PrintableString or UTF8string DER value from a string
      */
-    public DerValue(String value) throws IOException {
+    public DerValue(String value) {
         boolean isPrintableString = true;
         for (int i = 0; i < value.length(); i++) {
             if (!isPrintableStringChar(value.charAt(i))) {
@@ -221,7 +224,7 @@
      * @param stringTag the tag for the DER value to create
      * @param value the String object to use for the DER value
      */
-    public DerValue(byte stringTag, String value) throws IOException {
+    public DerValue(byte stringTag, String value) {
         data = init(stringTag, value);
     }
 
@@ -337,9 +340,8 @@
         this(in, true);
     }
 
-    private DerInputStream init(byte stringTag, String value)
-        throws IOException {
-        String enc = null;
+    private DerInputStream init(byte stringTag, String value) {
+        final Charset charset;
 
         tag = stringTag;
 
@@ -347,16 +349,16 @@
         case tag_PrintableString:
         case tag_IA5String:
         case tag_GeneralString:
-            enc = "ASCII";
+            charset = US_ASCII;
             break;
         case tag_T61String:
-            enc = "ISO-8859-1";
+            charset = ISO_8859_1;
             break;
         case tag_BMPString:
-            enc = "UnicodeBigUnmarked";
+            charset = UTF_16BE;
             break;
         case tag_UTF8String:
-            enc = "UTF8";
+            charset = UTF_8;
             break;
             // TBD: Need encoder for UniversalString before it can
             // be handled.
@@ -364,7 +366,7 @@
             throw new IllegalArgumentException("Unsupported DER string type");
         }
 
-        byte[] buf = value.getBytes(enc);
+        byte[] buf = value.getBytes(charset);
         length = buf.length;
         buffer = new DerInputBuffer(buf, true);
         DerInputStream result = new DerInputStream(buffer);
@@ -665,7 +667,7 @@
             throw new IOException(
                 "DerValue.getPrintableString, not a string " + tag);
 
-        return new String(getDataBytes(), "ASCII");
+        return new String(getDataBytes(), US_ASCII);
     }
 
     /**
@@ -678,7 +680,7 @@
             throw new IOException(
                 "DerValue.getT61String, not T61 " + tag);
 
-        return new String(getDataBytes(), "ISO-8859-1");
+        return new String(getDataBytes(), ISO_8859_1);
     }
 
     /**
@@ -691,7 +693,7 @@
             throw new IOException(
                 "DerValue.getIA5String, not IA5 " + tag);
 
-        return new String(getDataBytes(), "ASCII");
+        return new String(getDataBytes(), US_ASCII);
     }
 
     /**
@@ -707,7 +709,7 @@
 
         // BMPString is the same as Unicode in big endian, unmarked
         // format.
-        return new String(getDataBytes(), "UnicodeBigUnmarked");
+        return new String(getDataBytes(), UTF_16BE);
     }
 
     /**
@@ -721,7 +723,7 @@
             throw new IOException(
                 "DerValue.getUTF8String, not UTF-8 " + tag);
 
-        return new String(getDataBytes(), "UTF8");
+        return new String(getDataBytes(), UTF_8);
     }
 
     /**
@@ -735,7 +737,7 @@
             throw new IOException(
                 "DerValue.getGeneralString, not GeneralString " + tag);
 
-        return new String(getDataBytes(), "ASCII");
+        return new String(getDataBytes(), US_ASCII);
     }
 
     /**
--- a/src/java.base/share/classes/sun/security/util/DomainName.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/util/DomainName.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,8 @@
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import sun.security.ssl.SSLLogger;
 
 /**
@@ -151,7 +153,7 @@
         private final boolean hasExceptions;
 
         private Rules(InputStream is) throws IOException {
-            InputStreamReader isr = new InputStreamReader(is, "UTF-8");
+            InputStreamReader isr = new InputStreamReader(is, UTF_8);
             BufferedReader reader = new BufferedReader(isr);
             boolean hasExceptions = false;
 
--- a/src/java.base/share/classes/sun/security/util/HexDumpEncoder.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/util/HexDumpEncoder.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1995, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,8 @@
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import static java.nio.charset.StandardCharsets.ISO_8859_1;
+
 /**
  * This class encodes a buffer into the classic: "Hexadecimal Dump" format of
  * the past. It is useful for analyzing the contents of binary buffers.
@@ -183,17 +185,15 @@
      */
     public String encode(byte aBuffer[]) {
         ByteArrayOutputStream outStream = new ByteArrayOutputStream();
-        ByteArrayInputStream    inStream = new ByteArrayInputStream(aBuffer);
-        String retVal = null;
+        ByteArrayInputStream inStream = new ByteArrayInputStream(aBuffer);
         try {
             encode(inStream, outStream);
             // explicit ascii->unicode conversion
-            retVal = outStream.toString("ISO-8859-1");
-        } catch (Exception IOException) {
+            return outStream.toString(ISO_8859_1);
+        } catch (IOException ignore) {
             // This should never happen.
             throw new Error("CharacterEncoder.encode internal error");
         }
-        return (retVal);
     }
 
     /**
--- a/src/java.base/share/classes/sun/security/x509/AVA.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/x509/AVA.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1996, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1996, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
 import java.text.Normalizer;
 import java.util.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import sun.security.action.GetBooleanAction;
 import sun.security.util.*;
 import sun.security.pkcs.PKCS9Attribute;
@@ -525,14 +527,13 @@
         return null;
     }
 
-    private static String getEmbeddedHexString(List<Byte> hexList)
-                                                throws IOException {
+    private static String getEmbeddedHexString(List<Byte> hexList) {
         int n = hexList.size();
         byte[] hexBytes = new byte[n];
         for (int i = 0; i < n; i++) {
-                hexBytes[i] = hexList.get(i).byteValue();
+            hexBytes[i] = hexList.get(i).byteValue();
         }
-        return new String(hexBytes, "UTF8");
+        return new String(hexBytes, UTF_8);
     }
 
     private static boolean isTerminator(int ch, int format) {
@@ -752,7 +753,7 @@
              */
             String valStr = null;
             try {
-                valStr = new String(value.getDataBytes(), "UTF8");
+                valStr = new String(value.getDataBytes(), UTF_8);
             } catch (IOException ie) {
                 throw new IllegalArgumentException("DER Value conversion");
             }
@@ -804,13 +805,7 @@
 
                     // embed non-printable/non-escaped char
                     // as escaped hex pairs for debugging
-                    byte[] valueBytes = null;
-                    try {
-                        valueBytes = Character.toString(c).getBytes("UTF8");
-                    } catch (IOException ie) {
-                        throw new IllegalArgumentException
-                                        ("DER Value conversion");
-                    }
+                    byte[] valueBytes = Character.toString(c).getBytes(UTF_8);
                     for (int j = 0; j < valueBytes.length; j++) {
                         sbuffer.append('\\');
                         char hexChar = Character.forDigit
@@ -905,7 +900,7 @@
              */
             String valStr = null;
             try {
-                valStr = new String(value.getDataBytes(), "UTF8");
+                valStr = new String(value.getDataBytes(), UTF_8);
             } catch (IOException ie) {
                 throw new IllegalArgumentException("DER Value conversion");
             }
@@ -966,13 +961,7 @@
 
                     previousWhite = false;
 
-                    byte[] valueBytes = null;
-                    try {
-                        valueBytes = Character.toString(c).getBytes("UTF8");
-                    } catch (IOException ie) {
-                        throw new IllegalArgumentException
-                                        ("DER Value conversion");
-                    }
+                    byte[] valueBytes = Character.toString(c).getBytes(UTF_8);
                     for (int j = 0; j < valueBytes.length; j++) {
                         sbuffer.append('\\');
                         sbuffer.append(Character.forDigit
@@ -1116,7 +1105,7 @@
 
                         // embed escaped hex pairs
                         byte[] valueBytes =
-                                Character.toString(c).getBytes("UTF8");
+                                Character.toString(c).getBytes(UTF_8);
                         for (int j = 0; j < valueBytes.length; j++) {
                             sbuffer.append('\\');
                             char hexChar = Character.forDigit
--- a/src/java.base/share/classes/sun/security/x509/X509CertImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/security/x509/X509CertImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -45,6 +45,8 @@
 import sun.security.util.*;
 import sun.security.provider.X509Factory;
 
+import static java.nio.charset.StandardCharsets.US_ASCII;
+
 /**
  * The X509CertImpl class represents an X.509 certificate. These certificates
  * are widely used to support authentication and other functionality in
@@ -250,7 +252,7 @@
         DerValue der = null;
         String line = null;
         BufferedReader certBufferedReader =
-            new BufferedReader(new InputStreamReader(in, "ASCII"));
+            new BufferedReader(new InputStreamReader(in, US_ASCII));
         try {
             line = certBufferedReader.readLine();
         } catch (IOException ioe1) {
--- a/src/java.base/share/classes/sun/util/locale/provider/HostLocaleProviderAdapter.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/classes/sun/util/locale/provider/HostLocaleProviderAdapter.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
 
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
+import java.text.DecimalFormat;
 import java.util.spi.LocaleServiceProvider;
 
 /**
@@ -60,4 +61,19 @@
         }
         return null;
     }
+
+    /**
+     * Utility to make the decimal format specific to integer, called
+     * by the platform dependent adapter implementations.
+     *
+     * @param df A DecimalFormat object
+     * @return The same DecimalFormat object in the argument, modified
+     *          to allow integer formatting/parsing only.
+     */
+    static DecimalFormat makeIntegerFormatter(DecimalFormat df) {
+        df.setMaximumFractionDigits(0);
+        df.setDecimalSeparatorAlwaysShown(false);
+        df.setParseIntegerOnly(true);
+        return df;
+    }
 }
--- a/src/java.base/share/man/java.1	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/man/java.1	Thu Nov 14 13:50:03 2019 +0000
@@ -2975,70 +2975,6 @@
 application uses the heap space.
 .RS
 .RE
-.TP
-.B \f[CB]\-XX:+CMSClassUnloadingEnabled\f[R]
-Enables class unloading when using the concurrent mark\-sweep (CMS)
-garbage collector.
-This option is enabled by default.
-To disable class unloading for the CMS garbage collector, specify
-\f[CB]\-XX:\-CMSClassUnloadingEnabled\f[R].
-.RS
-.RE
-.TP
-.B \f[CB]\-XX:CMSExpAvgFactor=\f[R]\f[I]percent\f[R]
-Sets the percentage of time (0 to 100) used to weight the current sample
-when computing exponential averages for the concurrent collection
-statistics.
-By default, the exponential averages factor is set to 25%.
-The following example shows how to set the factor to 15%:
-.RS
-.RS
-.PP
-\f[CB]\-XX:CMSExpAvgFactor=15\f[R]
-.RE
-.RE
-.TP
-.B \f[CB]\-XX:CMSInitiatingOccupancyFraction=\f[R]\f[I]percent\f[R]
-Sets the percentage of the old generation occupancy (0 to 100) at which
-to start a CMS collection cycle.
-The default value is set to \-1.
-Any negative value (including the default) implies that the option
-\f[CB]\-XX:CMSTriggerRatio\f[R] is used to define the value of the
-initiating occupancy fraction.
-.RS
-.PP
-The following example shows how to set the factor to 20%:
-.RS
-.PP
-\f[CB]\-XX:CMSInitiatingOccupancyFraction=20\f[R]
-.RE
-.RE
-.TP
-.B \f[CB]\-XX:CMSIncrementalDutySafetyFactor=\f[R]\f[I]percent\f[R]
-Sets the percentage (0 to 100) used to add conservatism when computing
-the duty cycle.
-The default value is 10.
-.RS
-.RE
-.TP
-.B \f[CB]\-XX:+CMSScavengeBeforeRemark\f[R]
-Enables scavenging attempts before the CMS remark step.
-By default, this option is disabled.
-.RS
-.RE
-.TP
-.B \f[CB]\-XX:CMSTriggerRatio=percent\f[R]
-Sets the percentage (0 to 100) of the value specified by the option
-\f[CB]\-XX:MinHeapFreeRatio\f[R] that\[aq]s allocated before a CMS
-collection cycle commences.
-The default value is set to 80%.
-.RS
-.PP
-The following example shows how to set the occupancy fraction to 75%:
-.RS
-.PP
-\f[CB]\-XX:CMSTriggerRatio=75\f[R]
-.RE
 .RE
 .TP
 .B \f[CB]\-XX:ConcGCThreads=\f[R]\f[I]threads\f[R]
@@ -3070,7 +3006,6 @@
 Enables invoking of concurrent GC by using the \f[CB]System.gc()\f[R]
 request.
 This option is disabled by default and can be enabled only with the
-deprecated \f[CB]\-XX:+UseConcMarkSweepGC\f[R] option and the
 \f[CB]\-XX:+UseG1GC\f[R] option.
 .RS
 .RE
@@ -3460,8 +3395,7 @@
 .B \f[CB]\-XX:MaxTenuringThreshold=\f[R]\f[I]threshold\f[R]
 Sets the maximum tenuring threshold for use in adaptive GC sizing.
 The largest value is 15.
-The default value is 15 for the parallel (throughput) collector, and 6
-for the CMS collector.
+The default value is 15 for the parallel (throughput) collector.
 .RS
 .PP
 The following example shows how to set the maximum tenuring threshold to
@@ -3724,13 +3658,6 @@
 .RS
 .RE
 .TP
-.B \f[CB]\-XX:+UseCMSInitiatingOccupancyOnly\f[R]
-Enables the use of the occupancy value as the only criterion for
-initiating the CMS collector.
-By default, this option is disabled and other criteria may be used.
-.RS
-.RE
-.TP
 .B \f[CB]\-XX:+UseG1GC\f[R]
 Enables the use of the garbage\-first (G1) garbage collector.
 It\[aq]s a server\-style garbage collector, targeted for multiprocessor
@@ -3975,21 +3902,6 @@
 See \f[B]Enable Logging with the JVM Unified Logging Framework\f[R].
 .RE
 .TP
-.B \f[CB]\-XX:+UseConcMarkSweepGC\f[R]
-Enables the use of the CMS garbage collector for the old generation.
-CMS is an alternative to the default garbage collector (G1), which also
-focuses on meeting application latency requirements.
-By default, this option is disabled and the collector is selected
-automatically based on the configuration of the machine and type of the
-JVM.
-The CMS garbage collector is deprecated.
-.RS
-.RE
-.SH OBSOLETE JAVA OPTIONS
-.PP
-These \f[CB]java\f[R] options are still accepted but ignored, and a
-warning is issued when they\[aq]re used.
-.TP
 .B \f[CB]\-XX:+UseMembar\f[R]
 Enabled issuing membars on thread\-state transitions.
 This option was disabled by default on all platforms except ARM servers,
--- a/src/java.base/share/native/libjli/args.c	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/share/native/libjli/args.c	Thu Nov 14 13:50:03 2019 +0000
@@ -337,7 +337,9 @@
     // remaining partial token
     if (ctx.state == IN_TOKEN || ctx.state == IN_QUOTE) {
         if (ctx.parts->size != 0) {
-            JLI_List_add(rv, JLI_List_combine(ctx.parts));
+            token = JLI_List_combine(ctx.parts);
+            checkArg(token);
+            JLI_List_add(rv, token);
         }
     }
     JLI_List_free(ctx.parts);
--- a/src/java.base/unix/native/libnio/ch/Net.c	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/unix/native/libnio/ch/Net.c	Thu Nov 14 13:50:03 2019 +0000
@@ -158,24 +158,34 @@
 JNIEXPORT jboolean JNICALL
 Java_sun_nio_ch_Net_canIPv6SocketJoinIPv4Group0(JNIEnv* env, jclass cl)
 {
-#if defined(__APPLE__) || defined(_AIX)
-    /* for now IPv6 sockets cannot join IPv4 multicast groups */
+#if defined(__linux__) || defined(__APPLE__) || defined(__solaris__)
+    /* IPv6 sockets can join IPv4 multicast groups */
+    return JNI_TRUE;
+#else
+    /* IPv6 sockets cannot join IPv4 multicast groups */
     return JNI_FALSE;
-#else
-    return JNI_TRUE;
 #endif
 }
 
 JNIEXPORT jboolean JNICALL
 Java_sun_nio_ch_Net_canJoin6WithIPv4Group0(JNIEnv* env, jclass cl)
 {
-#ifdef __solaris__
+#if defined(__APPLE__) || defined(__solaris__)
+    /* IPV6_ADD_MEMBERSHIP can be used to join IPv4 multicast groups */
     return JNI_TRUE;
 #else
+    /* IPV6_ADD_MEMBERSHIP cannot be used to join IPv4 multicast groups */
     return JNI_FALSE;
 #endif
 }
 
+JNIEXPORT jboolean JNICALL
+Java_sun_nio_ch_Net_canUseIPv6OptionsWithIPv4LocalAddress0(JNIEnv* env, jclass cl)
+{
+    /* IPV6_XXX socket options can be used on IPv6 sockets bound to IPv4 address */
+    return JNI_TRUE;
+}
+
 JNIEXPORT jint JNICALL
 Java_sun_nio_ch_Net_socket0(JNIEnv *env, jclass cl, jboolean preferIPv6,
                             jboolean stream, jboolean reuse, jboolean ignored)
--- a/src/java.base/windows/classes/sun/nio/ch/WindowsSelectorImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/windows/classes/sun/nio/ch/WindowsSelectorImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -38,6 +38,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.function.Consumer;
+import jdk.internal.misc.Unsafe;
 
 /**
  * A multi-threaded implementation of Selector for Windows.
@@ -47,12 +48,26 @@
  */
 
 class WindowsSelectorImpl extends SelectorImpl {
+    private static final Unsafe unsafe = Unsafe.getUnsafe();
+    private static int addressSize = unsafe.addressSize();
+
+    private static int dependsArch(int value32, int value64) {
+        return (addressSize == 4) ? value32 : value64;
+    }
+
     // Initial capacity of the poll array
     private final int INIT_CAP = 8;
     // Maximum number of sockets for select().
     // Should be INIT_CAP times a power of 2
     private static final int MAX_SELECTABLE_FDS = 1024;
 
+    // Size of FD_SET struct to allocate a buffer for it in SubSelector,
+    // aligned to 8 bytes on 64-bit:
+    // struct { unsigned int fd_count; SOCKET fd_array[MAX_SELECTABLE_FDS]; }.
+    private static final long SIZEOF_FD_SET = dependsArch(
+            4 + MAX_SELECTABLE_FDS * 4,      // SOCKET = unsigned int
+            4 + MAX_SELECTABLE_FDS * 8 + 4); // SOCKET = unsigned __int64
+
     // The list of SelectableChannels serviced by this Selector. Every mod
     // MAX_SELECTABLE_FDS entry is bogus, to align this array with the poll
     // array,  where the corresponding entry is occupied by the wakeupSocket
@@ -326,6 +341,9 @@
         private final int[] readFds = new int [MAX_SELECTABLE_FDS + 1];
         private final int[] writeFds = new int [MAX_SELECTABLE_FDS + 1];
         private final int[] exceptFds = new int [MAX_SELECTABLE_FDS + 1];
+        // Buffer for readfds, writefds and exceptfds structs that are passed
+        // to native select().
+        private final long fdsBuffer = unsafe.allocateMemory(SIZEOF_FD_SET * 3);
 
         private SubSelector() {
             this.pollArrayIndex = 0; // main thread
@@ -338,7 +356,7 @@
         private int poll() throws IOException{ // poll for the main thread
             return poll0(pollWrapper.pollArrayAddress,
                          Math.min(totalChannels, MAX_SELECTABLE_FDS),
-                         readFds, writeFds, exceptFds, timeout);
+                         readFds, writeFds, exceptFds, timeout, fdsBuffer);
         }
 
         private int poll(int index) throws IOException {
@@ -347,11 +365,11 @@
                      (pollArrayIndex * PollArrayWrapper.SIZE_POLLFD),
                      Math.min(MAX_SELECTABLE_FDS,
                              totalChannels - (index + 1) * MAX_SELECTABLE_FDS),
-                     readFds, writeFds, exceptFds, timeout);
+                     readFds, writeFds, exceptFds, timeout, fdsBuffer);
         }
 
         private native int poll0(long pollAddress, int numfds,
-             int[] readFds, int[] writeFds, int[] exceptFds, long timeout);
+             int[] readFds, int[] writeFds, int[] exceptFds, long timeout, long fdsBuffer);
 
         private int processSelectedKeys(long updateCount, Consumer<SelectionKey> action) {
             int numKeysUpdated = 0;
@@ -415,6 +433,10 @@
             }
             return numKeysUpdated;
         }
+
+        private void freeFDSetBuffer() {
+            unsafe.freeMemory(fdsBuffer);
+        }
     }
 
     // Represents a helper thread used for select.
@@ -441,8 +463,10 @@
             while (true) { // poll loop
                 // wait for the start of poll. If this thread has become
                 // redundant, then exit.
-                if (startLock.waitForStart(this))
+                if (startLock.waitForStart(this)) {
+                    subSelector.freeFDSetBuffer();
                     return;
+                }
                 // call poll()
                 try {
                     subSelector.poll(index);
@@ -533,6 +557,7 @@
         for (SelectThread t: threads)
              t.makeZombie();
         startLock.startThreads();
+        subSelector.freeFDSetBuffer();
     }
 
     @Override
--- a/src/java.base/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/windows/classes/sun/util/locale/provider/HostLocaleProviderAdapterImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -258,8 +258,9 @@
             @Override
             public NumberFormat getIntegerInstance(Locale locale) {
                 AtomicReferenceArray<String> patterns = getNumberPatterns(locale);
-                return new DecimalFormat(patterns.get(NF_INTEGER),
+                DecimalFormat format = new DecimalFormat(patterns.get(NF_INTEGER),
                     DecimalFormatSymbols.getInstance(locale));
+                return HostLocaleProviderAdapter.makeIntegerFormatter(format);
             }
 
             @Override
--- a/src/java.base/windows/native/libjava/HostLocaleProviderAdapter_md.c	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/windows/native/libjava/HostLocaleProviderAdapter_md.c	Thu Nov 14 13:50:03 2019 +0000
@@ -910,7 +910,7 @@
     if (digits > 0) {
         int i;
         for(i = digits;  i > 0; i--) {
-            fractionPattern[i] = L'0';
+            fractionPattern[i] = L'#';
         }
         fractionPattern[0] = L'.';
         fractionPattern[digits+1] = L'\0';
--- a/src/java.base/windows/native/libnio/ch/Net.c	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/windows/native/libnio/ch/Net.c	Thu Nov 14 13:50:03 2019 +0000
@@ -127,12 +127,21 @@
 JNIEXPORT jboolean JNICALL
 Java_sun_nio_ch_Net_canIPv6SocketJoinIPv4Group0(JNIEnv* env, jclass cl)
 {
-    return JNI_FALSE;
+    /* IPv6 sockets can join IPv4 multicast groups */
+    return JNI_TRUE;
 }
 
 JNIEXPORT jboolean JNICALL
 Java_sun_nio_ch_Net_canJoin6WithIPv4Group0(JNIEnv* env, jclass cl)
 {
+    /* IPV6_ADD_MEMBERSHIP cannot be used to join IPv4 multicast groups */
+    return JNI_FALSE;
+}
+
+JNIEXPORT jboolean JNICALL
+Java_sun_nio_ch_Net_canUseIPv6OptionsWithIPv4LocalAddress0(JNIEnv* env, jclass cl)
+{
+    /* IPV6_XXX socket options cannot be used on IPv6 sockets bound to IPv4 address */
     return JNI_FALSE;
 }
 
@@ -279,7 +288,7 @@
     SOCKETADDRESS sa;
     int sa_len = sizeof(sa);
 
-    if (getsockname(fdval(env, fdo), &sa.sa, &sa_len) < 0) {
+    if (getsockname(fdval(env, fdo), &sa.sa, &sa_len) == SOCKET_ERROR) {
         int error = WSAGetLastError();
         if (error == WSAEINVAL) {
             return 0;
@@ -297,7 +306,7 @@
     int sa_len = sizeof(sa);
     int port;
 
-    if (getsockname(fdval(env, fdo), &sa.sa, &sa_len) < 0) {
+    if (getsockname(fdval(env, fdo), &sa.sa, &sa_len) == SOCKET_ERROR) {
         NET_ThrowNew(env, WSAGetLastError(), "getsockname");
         return NULL;
     }
@@ -310,7 +319,7 @@
     SOCKETADDRESS sa;
     int sa_len = sizeof(sa);
 
-    if (getpeername(fdval(env, fdo), &sa.sa, &sa_len) < 0) {
+    if (getpeername(fdval(env, fdo), &sa.sa, &sa_len) == SOCKET_ERROR) {
         int error = WSAGetLastError();
         if (error == WSAEINVAL) {
             return 0;
@@ -328,7 +337,7 @@
     int sa_len = sizeof(sa);
     int port;
 
-    if (getpeername(fdval(env, fdo), &sa.sa, &sa_len) < 0) {
+    if (getpeername(fdval(env, fdo), &sa.sa, &sa_len) == SOCKET_ERROR) {
         NET_ThrowNew(env, WSAGetLastError(), "getsockname");
         return NULL;
     }
@@ -366,7 +375,7 @@
     } else {
         n = getsockopt(fdval(env, fdo), level, opt, arg, &arglen);
     }
-    if (n < 0) {
+    if (n == SOCKET_ERROR) {
         handleSocketError(env, WSAGetLastError());
         return IOS_THROWN;
     }
@@ -410,7 +419,7 @@
     } else {
         n = setsockopt(fdval(env, fdo), level, opt, parg, arglen);
     }
-    if (n < 0)
+    if (n == SOCKET_ERROR)
         handleSocketError(env, WSAGetLastError());
 }
 
@@ -439,7 +448,7 @@
     }
 
     n = setsockopt(fdval(env,fdo), IPPROTO_IP, opt, optval, optlen);
-    if (n < 0) {
+    if (n == SOCKET_ERROR) {
         if (join && (WSAGetLastError() == WSAENOPROTOOPT))
             return IOS_UNAVAILABLE;
         handleSocketError(env, WSAGetLastError());
@@ -461,7 +470,7 @@
 
     n = setsockopt(fdval(env,fdo), IPPROTO_IP, opt,
                    (void*)&mreq_source, sizeof(mreq_source));
-    if (n < 0) {
+    if (n == SOCKET_ERROR) {
         if (block && (WSAGetLastError() == WSAENOPROTOOPT))
             return IOS_UNAVAILABLE;
         handleSocketError(env, WSAGetLastError());
@@ -516,8 +525,8 @@
         n = setGroupSourceReqOption(env, fdo, opt, group, index, source);
     }
 
-    if (n < 0) {
-        handleSocketError(env, errno);
+    if (n == SOCKET_ERROR) {
+        handleSocketError(env, WSAGetLastError());
     }
     return 0;
 }
@@ -528,8 +537,8 @@
 {
     int opt = (block) ? MCAST_BLOCK_SOURCE : MCAST_UNBLOCK_SOURCE;
     int n = setGroupSourceReqOption(env, fdo, opt, group, index, source);
-    if (n < 0) {
-        handleSocketError(env, errno);
+    if (n == SOCKET_ERROR) {
+        handleSocketError(env, WSAGetLastError());
     }
     return 0;
 }
@@ -545,7 +554,7 @@
 
     n = setsockopt(fdval(env, fdo), IPPROTO_IP, IP_MULTICAST_IF,
                    (void*)&(in.s_addr), arglen);
-    if (n < 0) {
+    if (n == SOCKET_ERROR) {
         handleSocketError(env, WSAGetLastError());
     }
 }
@@ -558,7 +567,7 @@
     int n;
 
     n = getsockopt(fdval(env, fdo), IPPROTO_IP, IP_MULTICAST_IF, (void*)&in, &arglen);
-    if (n < 0) {
+    if (n == SOCKET_ERROR) {
         handleSocketError(env, WSAGetLastError());
         return IOS_THROWN;
     }
@@ -568,27 +577,27 @@
 JNIEXPORT void JNICALL
 Java_sun_nio_ch_Net_setInterface6(JNIEnv* env, jobject this, jobject fdo, jint index)
 {
-    int value = (jint)index;
+    DWORD value = (jint)index;
     int arglen = sizeof(value);
     int n;
 
     n = setsockopt(fdval(env, fdo), IPPROTO_IPV6, IPV6_MULTICAST_IF,
                    (void*)&(index), arglen);
-    if (n < 0) {
-        handleSocketError(env, errno);
+    if (n == SOCKET_ERROR) {
+        handleSocketError(env, WSAGetLastError());
     }
 }
 
 JNIEXPORT jint JNICALL
 Java_sun_nio_ch_Net_getInterface6(JNIEnv* env, jobject this, jobject fdo)
 {
-    int index;
+    DWORD index;
     int arglen = sizeof(index);
     int n;
 
     n = getsockopt(fdval(env, fdo), IPPROTO_IPV6, IPV6_MULTICAST_IF, (void*)&index, &arglen);
-    if (n < 0) {
-        handleSocketError(env, errno);
+    if (n == SOCKET_ERROR) {
+        handleSocketError(env, WSAGetLastError());
         return -1;
     }
     return (jint)index;
--- a/src/java.base/windows/native/libnio/ch/WindowsSelectorImpl.c	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.base/windows/native/libnio/ch/WindowsSelectorImpl.c	Thu Nov 14 13:50:03 2019 +0000
@@ -39,6 +39,7 @@
 #include "jvm.h"
 #include "jni.h"
 #include "jni_util.h"
+#include "nio.h"
 #include "sun_nio_ch_WindowsSelectorImpl.h"
 #include "sun_nio_ch_PollArrayWrapper.h"
 
@@ -56,12 +57,14 @@
 Java_sun_nio_ch_WindowsSelectorImpl_00024SubSelector_poll0(JNIEnv *env, jobject this,
                                    jlong pollAddress, jint numfds,
                                    jintArray returnReadFds, jintArray returnWriteFds,
-                                   jintArray returnExceptFds, jlong timeout)
+                                   jintArray returnExceptFds, jlong timeout, jlong fdsBuffer)
 {
     DWORD result = 0;
     pollfd *fds = (pollfd *) pollAddress;
     int i;
-    FD_SET readfds, writefds, exceptfds;
+    FD_SET *readfds = (FD_SET *) jlong_to_ptr(fdsBuffer);
+    FD_SET *writefds = (FD_SET *) jlong_to_ptr(fdsBuffer + sizeof(FD_SET));
+    FD_SET *exceptfds = (FD_SET *) jlong_to_ptr(fdsBuffer + sizeof(FD_SET) * 2);
     struct timeval timevalue, *tv;
     static struct timeval zerotime = {0, 0};
     int read_count = 0, write_count = 0, except_count = 0;
@@ -93,103 +96,61 @@
     /* Set FD_SET structures required for select */
     for (i = 0; i < numfds; i++) {
         if (fds[i].events & POLLIN) {
-           readfds.fd_array[read_count] = fds[i].fd;
+           readfds->fd_array[read_count] = fds[i].fd;
            read_count++;
         }
         if (fds[i].events & (POLLOUT | POLLCONN))
         {
-           writefds.fd_array[write_count] = fds[i].fd;
+           writefds->fd_array[write_count] = fds[i].fd;
            write_count++;
         }
-        exceptfds.fd_array[except_count] = fds[i].fd;
+        exceptfds->fd_array[except_count] = fds[i].fd;
         except_count++;
     }
 
-    readfds.fd_count = read_count;
-    writefds.fd_count = write_count;
-    exceptfds.fd_count = except_count;
+    readfds->fd_count = read_count;
+    writefds->fd_count = write_count;
+    exceptfds->fd_count = except_count;
 
     /* Call select */
-    if ((result = select(0 , &readfds, &writefds, &exceptfds, tv))
+    if ((result = select(0 , readfds, writefds, exceptfds, tv))
                                                              == SOCKET_ERROR) {
-        /* Bad error - this should not happen frequently */
-        /* Iterate over sockets and call select() on each separately */
-        FD_SET errreadfds, errwritefds, errexceptfds;
-        readfds.fd_count = 0;
-        writefds.fd_count = 0;
-        exceptfds.fd_count = 0;
-        for (i = 0; i < numfds; i++) {
-            /* prepare select structures for the i-th socket */
-            errreadfds.fd_count = 0;
-            errwritefds.fd_count = 0;
-            if (fds[i].events & POLLIN) {
-               errreadfds.fd_array[0] = fds[i].fd;
-               errreadfds.fd_count = 1;
-            }
-            if (fds[i].events & (POLLOUT | POLLCONN))
-            {
-                errwritefds.fd_array[0] = fds[i].fd;
-                errwritefds.fd_count = 1;
-            }
-            errexceptfds.fd_array[0] = fds[i].fd;
-            errexceptfds.fd_count = 1;
-
-            /* call select on the i-th socket */
-            if (select(0, &errreadfds, &errwritefds, &errexceptfds, &zerotime)
-                                                             == SOCKET_ERROR) {
-                /* This socket causes an error. Add it to exceptfds set */
-                exceptfds.fd_array[exceptfds.fd_count] = fds[i].fd;
-                exceptfds.fd_count++;
-            } else {
-                /* This socket does not cause an error. Process result */
-                if (errreadfds.fd_count == 1) {
-                    readfds.fd_array[readfds.fd_count] = fds[i].fd;
-                    readfds.fd_count++;
-                }
-                if (errwritefds.fd_count == 1) {
-                    writefds.fd_array[writefds.fd_count] = fds[i].fd;
-                    writefds.fd_count++;
-                }
-                if (errexceptfds.fd_count == 1) {
-                    exceptfds.fd_array[exceptfds.fd_count] = fds[i].fd;
-                    exceptfds.fd_count++;
-                }
-            }
-        }
+        JNU_ThrowIOExceptionWithLastError(env, "Select failed");
+        return IOS_THROWN;
     }
 
     /* Return selected sockets. */
     /* Each Java array consists of sockets count followed by sockets list */
 
 #ifdef _WIN64
-    resultbuf[0] = readfds.fd_count;
-    for (i = 0; i < (int)readfds.fd_count; i++) {
-        resultbuf[i + 1] = (int)readfds.fd_array[i];
+    resultbuf[0] = readfds->fd_count;
+    for (i = 0; i < (int)readfds->fd_count; i++) {
+        resultbuf[i + 1] = (int)readfds->fd_array[i];
     }
     (*env)->SetIntArrayRegion(env, returnReadFds, 0,
-                              readfds.fd_count + 1, resultbuf);
+                              readfds->fd_count + 1, resultbuf);
 
-    resultbuf[0] = writefds.fd_count;
-    for (i = 0; i < (int)writefds.fd_count; i++) {
-        resultbuf[i + 1] = (int)writefds.fd_array[i];
+    resultbuf[0] = writefds->fd_count;
+    for (i = 0; i < (int)writefds->fd_count; i++) {
+        resultbuf[i + 1] = (int)writefds->fd_array[i];
     }
     (*env)->SetIntArrayRegion(env, returnWriteFds, 0,
-                              writefds.fd_count + 1, resultbuf);
+                              writefds->fd_count + 1, resultbuf);
 
-    resultbuf[0] = exceptfds.fd_count;
-    for (i = 0; i < (int)exceptfds.fd_count; i++) {
-        resultbuf[i + 1] = (int)exceptfds.fd_array[i];
+    resultbuf[0] = exceptfds->fd_count;
+    for (i = 0; i < (int)exceptfds->fd_count; i++) {
+        resultbuf[i + 1] = (int)exceptfds->fd_array[i];
     }
     (*env)->SetIntArrayRegion(env, returnExceptFds, 0,
-                              exceptfds.fd_count + 1, resultbuf);
+                              exceptfds->fd_count + 1, resultbuf);
 #else
     (*env)->SetIntArrayRegion(env, returnReadFds, 0,
-                              readfds.fd_count + 1, (jint *)&readfds);
+                              readfds->fd_count + 1, (jint *)readfds);
 
     (*env)->SetIntArrayRegion(env, returnWriteFds, 0,
-                              writefds.fd_count + 1, (jint *)&writefds);
+                              writefds->fd_count + 1, (jint *)writefds);
     (*env)->SetIntArrayRegion(env, returnExceptFds, 0,
-                              exceptfds.fd_count + 1, (jint *)&exceptfds);
+                              exceptfds->fd_count + 1, (jint *)exceptfds);
 #endif
     return 0;
 }
--- a/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.compiler/share/classes/javax/lang/model/SourceVersion.java	Thu Nov 14 13:50:03 2019 +0000
@@ -58,9 +58,9 @@
      *   9: modules, small cleanups to 1.7 and 1.8 changes
      *  10: local-variable type inference (var)
      *  11: local-variable syntax for lambda parameters
-     *  12: no changes (switch expressions were in preview)
+     *  12: no changes (switch expressions in preview)
      *  13: no changes (switch expressions and text blocks in preview)
-     *  14: TBD
+     *  14: switch expressions
      */
 
     /**
@@ -199,6 +199,8 @@
      * The version recognized by the Java Platform, Standard Edition
      * 14.
      *
+     * Additions in this release include switch expressions.
+     *
      * @since 14
      */
      RELEASE_14;
--- a/src/java.net.http/share/classes/jdk/internal/net/http/AuthenticationFilter.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.net.http/share/classes/jdk/internal/net/http/AuthenticationFilter.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,6 @@
 import java.net.InetSocketAddress;
 import java.net.URISyntaxException;
 import java.net.URL;
-import java.nio.charset.Charset;
 import java.util.Base64;
 import java.util.LinkedList;
 import java.util.List;
@@ -380,10 +379,18 @@
             return null;
         }
 
+        private static boolean equalsIgnoreCase(String s1, String s2) {
+            return s1 == s2 || (s1 != null && s1.equalsIgnoreCase(s2));
+        }
+
         synchronized void remove(String authscheme, URI domain, boolean proxy) {
-            for (CacheEntry entry : entries) {
-                if (entry.equalsKey(domain, proxy)) {
-                    entries.remove(entry);
+            var iterator = entries.iterator();
+            while (iterator.hasNext()) {
+                var entry = iterator.next();
+                if (equalsIgnoreCase(entry.scheme, authscheme)) {
+                    if (entry.equalsKey(domain, proxy)) {
+                        iterator.remove();
+                    }
                 }
             }
         }
--- a/src/java.security.jgss/share/classes/javax/security/auth/kerberos/KerberosPrincipal.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/javax/security/auth/kerberos/KerberosPrincipal.java	Thu Nov 14 13:50:03 2019 +0000
@@ -82,6 +82,8 @@
 
     /**
      * Enterprise name (alias)
+     *
+     * @since 13
      */
     public static final int KRB_NT_ENTERPRISE = 10;
 
--- a/src/java.security.jgss/share/classes/sun/security/jgss/GSSNameImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/GSSNameImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,11 +32,12 @@
 import java.util.HashSet;
 import java.util.Arrays;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import sun.security.util.ObjectIdentifier;
 import sun.security.util.DerInputStream;
 import sun.security.util.DerOutputStream;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * This is the implementation class for GSSName. Conceptually the
  * GSSName is a container with mechanism specific name elements. Each
@@ -227,13 +228,10 @@
         byte[] bytes = null;
 
         if (appName instanceof String) {
-            try {
-                bytes = ((String) appName).getBytes("UTF-8");
-            } catch (UnsupportedEncodingException e) {
-                // Won't happen
-            }
-        } else
+            bytes = ((String) appName).getBytes(UTF_8);
+        } else {
             bytes = (byte[]) appName;
+        }
 
         if ((bytes[pos++] != 0x04) ||
             (bytes[pos++] != 0x01))
@@ -320,21 +318,14 @@
             if (!this.appNameType.equals(that.appNameType)) {
                 return false;
             }
-            byte[] myBytes = null;
-            byte[] bytes = null;
-            try {
-                myBytes =
+            byte[] myBytes =
                     (this.appNameStr != null ?
-                     this.appNameStr.getBytes("UTF-8") :
+                     this.appNameStr.getBytes(UTF_8) :
                      this.appNameBytes);
-                bytes =
+            byte[] bytes =
                     (that.appNameStr != null ?
-                     that.appNameStr.getBytes("UTF-8") :
+                     that.appNameStr.getBytes(UTF_8) :
                      that.appNameBytes);
-            } catch (UnsupportedEncodingException e) {
-                // Won't happen
-            }
-
             return Arrays.equals(myBytes, bytes);
         }
 
--- a/src/java.security.jgss/share/classes/sun/security/jgss/krb5/Krb5NameElement.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/krb5/Krb5NameElement.java	Thu Nov 14 13:50:03 2019 +0000
@@ -32,12 +32,13 @@
 import sun.security.krb5.KrbException;
 
 import javax.security.auth.kerberos.ServicePermission;
-import java.io.UnsupportedEncodingException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.security.Provider;
 import java.util.Locale;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * Implements the GSSNameSpi for the krb5 mechanism.
  *
@@ -51,9 +52,6 @@
     private String gssNameStr = null;
     private Oid gssNameType = null;
 
-    // XXX Move this concept into PrincipalName's asn1Encode() sometime
-    private static String CHAR_ENCODING = "UTF-8";
-
     private Krb5NameElement(PrincipalName principalName,
                             String gssNameStr,
                             Oid gssNameType) {
@@ -285,13 +283,7 @@
      */
     public byte[] export() throws GSSException {
         // XXX Apply the above constraints.
-        byte[] retVal = null;
-        try {
-            retVal = krb5PrincipalName.getName().getBytes(CHAR_ENCODING);
-        } catch (UnsupportedEncodingException e) {
-            // Can't happen
-        }
-        return retVal;
+        return krb5PrincipalName.getName().getBytes(UTF_8);
     }
 
     /**
--- a/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSNameElement.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/GSSNameElement.java	Thu Nov 14 13:50:03 2019 +0000
@@ -29,7 +29,6 @@
 import java.security.Provider;
 import java.security.Security;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import sun.security.krb5.Realm;
 import sun.security.jgss.GSSUtil;
 import sun.security.util.ObjectIdentifier;
--- a/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/NativeGSSFactory.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/jgss/wrapper/NativeGSSFactory.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,6 @@
 
 package sun.security.jgss.wrapper;
 
-import java.io.UnsupportedEncodingException;
 import java.security.Provider;
 import java.util.Vector;
 import org.ietf.jgss.*;
@@ -34,6 +33,8 @@
 import sun.security.jgss.GSSExceptionImpl;
 import sun.security.jgss.spi.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * JGSS plugin for generic mechanisms provided through native GSS framework.
  *
@@ -80,14 +81,9 @@
 
     public GSSNameSpi getNameElement(String nameStr, Oid nameType)
         throws GSSException {
-        try {
-            byte[] nameBytes =
-                (nameStr == null ? null : nameStr.getBytes("UTF-8"));
-            return new GSSNameElement(nameBytes, nameType, cStub);
-        } catch (UnsupportedEncodingException uee) {
-            // Shouldn't happen
-            throw new GSSExceptionImpl(GSSException.FAILURE, uee);
-        }
+        byte[] nameBytes =
+                (nameStr == null ? null : nameStr.getBytes(UTF_8));
+        return new GSSNameElement(nameBytes, nameType, cStub);
     }
 
     public GSSNameSpi getNameElement(byte[] name, Oid nameType)
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/ETypeInfo.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/ETypeInfo.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,13 @@
 
 package sun.security.krb5.internal;
 
-import sun.security.util.*;
+import java.io.IOException;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import sun.security.krb5.Asn1Exception;
-import java.io.IOException;
 import sun.security.krb5.internal.util.KerberosString;
+import sun.security.util.*;
 
 /**
  * Implements the ASN.1 ETYPE-INFO-ENTRY type.
@@ -99,7 +102,7 @@
                 // KerberosString in most implementations.
 
                 if (KerberosString.MSNAME) {
-                    this.salt = new String(saltBytes, "UTF8");
+                    this.salt = new String(saltBytes, UTF_8);
                 } else {
                     this.salt = new String(saltBytes);
                 }
@@ -129,7 +132,7 @@
         if (salt != null) {
             temp = new DerOutputStream();
             if (KerberosString.MSNAME) {
-                temp.putOctetString(salt.getBytes("UTF8"));
+                temp.putOctetString(salt.getBytes(UTF_8));
             } else {
                 temp.putOctetString(salt.getBytes());
             }
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/PAData.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/PAData.java	Thu Nov 14 13:50:03 2019 +0000
@@ -31,13 +31,15 @@
 
 package sun.security.krb5.internal;
 
-import sun.security.krb5.internal.crypto.EType;
-import sun.security.util.*;
-import sun.security.krb5.Asn1Exception;
 import java.io.IOException;
 import java.util.Vector;
 
+import static java.nio.charset.StandardCharsets.*;
+
+import sun.security.krb5.Asn1Exception;
 import sun.security.krb5.internal.util.KerberosString;
+import sun.security.krb5.internal.crypto.EType;
+import sun.security.util.*;
 
 /**
  * Implements the ASN.1 PA-DATA type.
@@ -263,7 +265,7 @@
             switch (p.getType()) {
                 case Krb5.PA_PW_SALT:
                     paPwSalt = new String(p.getValue(),
-                            KerberosString.MSNAME?"UTF8":"8859_1");
+                            KerberosString.MSNAME ? UTF_8 : ISO_8859_1);
                     break;
                 case Krb5.PA_ETYPE_INFO:
                     d = new DerValue(p.getValue());
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/PAForUserEnc.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/PAForUserEnc.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,8 @@
 import sun.security.util.DerOutputStream;
 import sun.security.util.DerValue;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * Implements the ASN.1 PA-FOR-USER type.
  *
@@ -163,25 +165,20 @@
      * 4. the string value of auth-package field
      */
     public byte[] getS4UByteArray() {
-        try {
-            ByteArrayOutputStream ba = new ByteArrayOutputStream();
-            ba.write(new byte[4]);
-            for (String s: name.getNameStrings()) {
-                ba.write(s.getBytes("UTF-8"));
-            }
-            ba.write(name.getRealm().toString().getBytes("UTF-8"));
-            ba.write(AUTH_PACKAGE.getBytes("UTF-8"));
-            byte[] output = ba.toByteArray();
-            int pnType = name.getNameType();
-            output[0] = (byte)(pnType & 0xff);
-            output[1] = (byte)((pnType>>8) & 0xff);
-            output[2] = (byte)((pnType>>16) & 0xff);
-            output[3] = (byte)((pnType>>24) & 0xff);
-            return output;
-        } catch (IOException ioe) {
-            // not possible
-            throw new AssertionError("Cannot write ByteArrayOutputStream", ioe);
+        ByteArrayOutputStream ba = new ByteArrayOutputStream();
+        ba.writeBytes(new byte[4]);
+        for (String s: name.getNameStrings()) {
+            ba.writeBytes(s.getBytes(UTF_8));
         }
+        ba.writeBytes(name.getRealm().toString().getBytes(UTF_8));
+        ba.writeBytes(AUTH_PACKAGE.getBytes(UTF_8));
+        byte[] output = ba.toByteArray();
+        int pnType = name.getNameType();
+        output[0] = (byte)(pnType & 0xff);
+        output[1] = (byte)((pnType>>8) & 0xff);
+        output[2] = (byte)((pnType>>16) & 0xff);
+        output[3] = (byte)((pnType>>24) & 0xff);
+        return output;
     }
 
     public String toString() {
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/ccache/FileCredentialsCache.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/ccache/FileCredentialsCache.java	Thu Nov 14 13:50:03 2019 +0000
@@ -51,6 +51,8 @@
 import java.io.BufferedReader;
 import java.io.InputStreamReader;
 
+import static java.nio.charset.StandardCharsets.ISO_8859_1;
+
 /**
  * CredentialsCache stores credentials(tickets, session keys, etc) in a
  * semi-permanent store
@@ -594,7 +596,7 @@
 
             BufferedReader commandResult =
                 new BufferedReader
-                    (new InputStreamReader(p.getInputStream(), "8859_1"));
+                    (new InputStreamReader(p.getInputStream(), ISO_8859_1));
             String s1 = null;
             if ((command.length == 1) &&
                 (command[0].equals("/usr/bin/env"))) {
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/dk/AesDkCrypto.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/dk/AesDkCrypto.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,6 +43,8 @@
 import sun.security.krb5.internal.crypto.KeyUsage;
 import java.util.Arrays;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * This class provides the implementation of AES Encryption for Kerberos
  * as defined RFC 3962.
@@ -104,7 +106,7 @@
 
         byte[] saltUtf8 = null;
         try {
-            saltUtf8 = salt.getBytes("UTF-8");
+            saltUtf8 = salt.getBytes(UTF_8);
             return stringToKey(password, saltUtf8, s2kparams);
         } catch (Exception e) {
             return null;
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/dk/AesSha2DkCrypto.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/dk/AesSha2DkCrypto.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,6 +41,8 @@
 import sun.security.krb5.internal.crypto.KeyUsage;
 import java.util.Arrays;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
  * This class provides the implementation of AES Encryption with
  * HMAC-SHA2 for Kerberos 5
@@ -107,7 +109,7 @@
 
         byte[] saltUtf8 = null;
         try {
-            saltUtf8 = salt.getBytes("UTF-8");
+            saltUtf8 = salt.getBytes(UTF_8);
             return stringToKey(password, saltUtf8, s2kparams);
         } catch (Exception e) {
             return null;
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/dk/DkCrypto.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/crypto/dk/DkCrypto.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
  */
 
 /*
@@ -33,7 +33,6 @@
 import javax.crypto.Cipher;
 import javax.crypto.Mac;
 import java.security.GeneralSecurityException;
-import java.io.UnsupportedEncodingException;
 import java.util.Arrays;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -45,6 +44,8 @@
 import sun.security.krb5.internal.crypto.KeyUsage;
 import sun.security.krb5.KrbCryptoException;
 
+import static java.nio.charset.StandardCharsets.*;
+
 /**
  * Implements Derive Key cryptography functionality as defined in RFC 3961.
  * http://www.ietf.org/rfc/rfc3961.txt
@@ -672,13 +673,11 @@
         }
     }
 
-// String.getBytes("UTF-8");
+// String.getBytes(UTF_8);
 // Do this instead of using String to avoid making password immutable
     static byte[] charToUtf8(char[] chars) {
-        Charset utf8 = Charset.forName("UTF-8");
-
         CharBuffer cb = CharBuffer.wrap(chars);
-        ByteBuffer bb = utf8.encode(cb);
+        ByteBuffer bb = UTF_8.encode(cb);
         int len = bb.limit();
         byte[] answer = new byte[len];
         bb.get(answer, 0, len);
@@ -686,10 +685,8 @@
     }
 
     static byte[] charToUtf16(char[] chars) {
-        Charset utf8 = Charset.forName("UTF-16LE");
-
         CharBuffer cb = CharBuffer.wrap(chars);
-        ByteBuffer bb = utf8.encode(cb);
+        ByteBuffer bb = UTF_16LE.encode(cb);
         int len = bb.limit();
         byte[] answer = new byte[len];
         bb.get(answer, 0, len);
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/ktab/KeyTabEntry.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/ktab/KeyTabEntry.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +33,8 @@
 
 import sun.security.krb5.*;
 import sun.security.krb5.internal.*;
-import java.io.UnsupportedEncodingException;
+
+import static java.nio.charset.StandardCharsets.ISO_8859_1;
 
 /**
  * This class represents a Key Table entry. Each entry contains the service principal of
@@ -83,17 +85,10 @@
         int totalPrincipalLength = 0;
         String[] names = service.getNameStrings();
         for (int i = 0; i < names.length; i++) {
-            try {
-                totalPrincipalLength += principalSize + names[i].getBytes("8859_1").length;
-            } catch (UnsupportedEncodingException exc) {
-            }
+            totalPrincipalLength += principalSize + names[i].getBytes(ISO_8859_1).length;
         }
 
-        int realmLen = 0;
-        try {
-            realmLen = realm.toString().getBytes("8859_1").length;
-        } catch (UnsupportedEncodingException exc) {
-        }
+        int realmLen = realm.toString().getBytes(ISO_8859_1).length;
 
         int size = principalComponentSize +  realmSize + realmLen
             + totalPrincipalLength + principalTypeSize
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/ktab/KeyTabOutputStream.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/ktab/KeyTabOutputStream.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,7 +36,8 @@
 import java.io.IOException;
 import java.io.FileOutputStream;
 import java.io.OutputStream;
-import java.io.UnsupportedEncodingException;
+
+import static java.nio.charset.StandardCharsets.ISO_8859_1;
 
 /**
  * This class implements a buffered input stream. It is used for parsing key table
@@ -68,21 +70,16 @@
         }
         else write16(comp_num);
 
-        byte[] realm = null;
-        try {
-            realm = entry.service.getRealmString().getBytes("8859_1");
-        } catch (UnsupportedEncodingException exc) {
-        }
-
+        byte[] realm = entry.service.getRealmString().getBytes(ISO_8859_1);
         write16(realm.length);
         write(realm);
+
         for (int i = 0; i < comp_num; i++) {
-            try {
-                write16(serviceNames[i].getBytes("8859_1").length);
-                write(serviceNames[i].getBytes("8859_1"));
-            } catch (UnsupportedEncodingException exc) {
-            }
+            byte[] serviceName = serviceNames[i].getBytes(ISO_8859_1);
+            write16(serviceName.length);
+            write(serviceName);
         }
+
         write32(entry.service.getNameType());
         //time is long, but we only use 4 bytes to store the data.
         write32((int)(entry.timestamp.getTime()/1000));
--- a/src/java.security.jgss/share/classes/sun/security/krb5/internal/util/KerberosString.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.jgss/share/classes/sun/security/krb5/internal/util/KerberosString.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,8 @@
 import sun.security.action.GetPropertyAction;
 import sun.security.util.DerValue;
 
+import static java.nio.charset.StandardCharsets.*;
+
 /**
  * Implements the ASN.1 KerberosString type.
  *
@@ -71,17 +73,17 @@
             throw new IOException(
                 "KerberosString's tag is incorrect: " + der.tag);
         }
-        s = new String(der.getDataBytes(), MSNAME?"UTF8":"ASCII");
+        s = new String(der.getDataBytes(), MSNAME ? UTF_8 : US_ASCII);
     }
 
     public String toString() {
         return s;
     }
 
-    public DerValue toDerValue() throws IOException {
+    public DerValue toDerValue() {
         // No need to cache the result since this method is
         // only called once.
         return new DerValue(DerValue.tag_GeneralString,
-                s.getBytes(MSNAME?"UTF8":"ASCII"));
+                s.getBytes(MSNAME ? UTF_8 : US_ASCII));
     }
 }
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/ClientFactoryImpl.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/ClientFactoryImpl.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,8 @@
 import javax.security.auth.callback.PasswordCallback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
   * Client factory for EXTERNAL, CRAM-MD5, PLAIN.
   *
@@ -141,7 +143,7 @@
             String authId;
 
             if (pw != null) {
-                bytepw = new String(pw).getBytes("UTF8");
+                bytepw = new String(pw).getBytes(UTF_8);
                 pcb.clearPassword();
             } else {
                 bytepw = null;
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/CramMD5Client.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/CramMD5Client.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,8 @@
 import java.util.logging.Logger;
 import java.util.logging.Level;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
   * Implements the CRAM-MD5 SASL client-side mechanism.
   * (<A HREF="http://www.ietf.org/rfc/rfc2195.txt">RFC 2195</A>).
@@ -82,8 +84,8 @@
      *        data from the server.
      * @return A non-null byte array containing the response to be sent to
      *        the server.
-     * @throws SaslException If platform does not have MD5 support
-     * @throw IllegalStateException if this method is invoked more than once.
+     * @throws SaslException if platform does not have MD5 support
+     * @throws IllegalStateException if this method is invoked more than once.
      */
     public byte[] evaluateChallenge(byte[] challengeData)
         throws SaslException {
@@ -103,7 +105,7 @@
         try {
             if (logger.isLoggable(Level.FINE)) {
                 logger.log(Level.FINE, "CRAMCLNT01:Received challenge: {0}",
-                    new String(challengeData, "UTF8"));
+                    new String(challengeData, UTF_8));
             }
 
             String digest = HMAC_MD5(pw, challengeData);
@@ -118,13 +120,10 @@
 
             completed = true;
 
-            return resp.getBytes("UTF8");
+            return resp.getBytes(UTF_8);
         } catch (java.security.NoSuchAlgorithmException e) {
             aborted = true;
             throw new SaslException("MD5 algorithm not available on platform", e);
-        } catch (java.io.UnsupportedEncodingException e) {
-            aborted = true;
-            throw new SaslException("UTF8 not available on platform", e);
         }
     }
 }
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/CramMD5Server.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/CramMD5Server.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,15 +25,15 @@
 
 package com.sun.security.sasl;
 
+import java.io.IOException;
+import java.security.NoSuchAlgorithmException;
+import java.util.logging.Level;
+import java.util.Map;
+import java.util.Random;
 import javax.security.sasl.*;
 import javax.security.auth.callback.*;
-import java.util.Random;
-import java.util.Map;
-import java.io.IOException;
-import java.io.UnsupportedEncodingException;
-import java.security.NoSuchAlgorithmException;
 
-import java.util.logging.Level;
+import static java.nio.charset.StandardCharsets.UTF_8;
 
 /**
   * Implements the CRAM-MD5 SASL server-side mechanism.
@@ -130,7 +130,7 @@
                 logger.log(Level.FINE,
                     "CRAMSRV01:Generated challenge: {0}", challengeStr);
 
-                challengeData = challengeStr.getBytes("UTF8");
+                challengeData = challengeStr.getBytes(UTF_8);
                 return challengeData.clone();
 
             } else {
@@ -138,7 +138,7 @@
                 if(logger.isLoggable(Level.FINE)) {
                     logger.log(Level.FINE,
                         "CRAMSRV02:Received response: {0}",
-                        new String(responseData, "UTF8"));
+                        new String(responseData, UTF_8));
                 }
 
                 // Extract username from response
@@ -154,7 +154,7 @@
                     throw new SaslException(
                         "CRAM-MD5: Invalid response; space missing");
                 }
-                String username = new String(responseData, 0, ulen, "UTF8");
+                String username = new String(responseData, 0, ulen, UTF_8);
 
                 logger.log(Level.FINE,
                     "CRAMSRV03:Extracted username: {0}", username);
@@ -177,7 +177,7 @@
                 for (int i = 0; i < pwChars.length; i++) {
                     pwChars[i] = 0;
                 }
-                pw = pwStr.getBytes("UTF8");
+                pw = pwStr.getBytes(UTF_8);
 
                 // Generate a keyed-MD5 digest from the user's password and
                 // original challenge.
@@ -190,7 +190,7 @@
                 clearPassword();
 
                 // Check whether digest is as expected
-                byte[] expectedDigest = digest.getBytes("UTF8");
+                byte[] expectedDigest = digest.getBytes(UTF_8);
                 int digestLen = responseData.length - ulen - 1;
                 if (expectedDigest.length != digestLen) {
                     aborted = true;
@@ -222,9 +222,6 @@
                 completed = true;
                 return null;
             }
-        } catch (UnsupportedEncodingException e) {
-            aborted = true;
-            throw new SaslException("UTF8 not available on platform", e);
         } catch (NoSuchAlgorithmException e) {
             aborted = true;
             throw new SaslException("MD5 algorithm not available on platform", e);
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/ExternalClient.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/ExternalClient.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,8 @@
 
 import javax.security.sasl.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
   * Implements the EXTERNAL SASL client mechanism.
   * (<A HREF="http://www.ietf.org/rfc/rfc2222.txt">RFC 2222</A>).
@@ -43,17 +45,10 @@
      * Constructs an External mechanism with optional authorization ID.
      *
      * @param authorizationID If non-null, used to specify authorization ID.
-     * @throws SaslException if cannot convert authorizationID into UTF-8
-     *     representation.
      */
-    ExternalClient(String authorizationID) throws SaslException {
+    ExternalClient(String authorizationID) {
         if (authorizationID != null) {
-            try {
-                username = authorizationID.getBytes("UTF8");
-            } catch (java.io.UnsupportedEncodingException e) {
-                throw new SaslException("Cannot convert " + authorizationID +
-                    " into UTF-8", e);
-            }
+            username = authorizationID.getBytes(UTF_8);
         } else {
             username = new byte[0];
         }
@@ -88,10 +83,9 @@
      *
      * @param challengeData Ignored.
      * @return The possible empty initial response.
-     * @throws SaslException If authentication has already been called.
+     * @throws IllegalStateException If authentication has already been called.
      */
-    public byte[] evaluateChallenge(byte[] challengeData)
-        throws SaslException {
+    public byte[] evaluateChallenge(byte[] challengeData) {
         if (completed) {
             throw new IllegalStateException(
                 "EXTERNAL authentication already completed");
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/PlainClient.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/PlainClient.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,8 @@
 
 import javax.security.sasl.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 /**
   * Implements the PLAIN SASL client mechanism.
   * (<A
@@ -89,43 +91,37 @@
      *
      * @param challengeData Ignored
      * @return A non-null byte array containing the response to be sent to the server.
-     * @throws SaslException If cannot encode ids in UTF-8
-     * @throw IllegalStateException if authentication already completed
+     * @throws IllegalStateException if authentication already completed
      */
-    public byte[] evaluateChallenge(byte[] challengeData) throws SaslException {
+    public byte[] evaluateChallenge(byte[] challengeData) {
         if (completed) {
             throw new IllegalStateException(
                 "PLAIN authentication already completed");
         }
         completed = true;
+        byte[] authz = (authorizationID != null)
+            ? authorizationID.getBytes(UTF_8)
+            : null;
+        byte[] auth = authenticationID.getBytes(UTF_8);
 
-        try {
-            byte[] authz = (authorizationID != null)?
-                authorizationID.getBytes("UTF8") :
-                null;
-            byte[] auth = authenticationID.getBytes("UTF8");
-
-            byte[] answer = new byte[pw.length + auth.length + 2 +
+        byte[] answer = new byte[pw.length + auth.length + 2 +
                 (authz == null ? 0 : authz.length)];
 
-            int pos = 0;
-            if (authz != null) {
-                System.arraycopy(authz, 0, answer, 0, authz.length);
-                pos = authz.length;
-            }
-            answer[pos++] = SEP;
-            System.arraycopy(auth, 0, answer, pos, auth.length);
+        int pos = 0;
+        if (authz != null) {
+            System.arraycopy(authz, 0, answer, 0, authz.length);
+            pos = authz.length;
+        }
+        answer[pos++] = SEP;
+        System.arraycopy(auth, 0, answer, pos, auth.length);
 
-            pos += auth.length;
-            answer[pos++] = SEP;
-
-            System.arraycopy(pw, 0, answer, pos, pw.length);
+        pos += auth.length;
+        answer[pos++] = SEP;
 
-            clearPassword();
-            return answer;
-        } catch (java.io.UnsupportedEncodingException e) {
-            throw new SaslException("Cannot get UTF-8 encoding of ids", e);
-        }
+        System.arraycopy(pw, 0, answer, pos, pw.length);
+
+        clearPassword();
+        return answer;
     }
 
     /**
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/digest/DigestMD5Base.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/digest/DigestMD5Base.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,17 +25,15 @@
 
 package com.sun.security.sasl.digest;
 
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.nio.charset.Charset;
 import java.util.Map;
 import java.util.Arrays;
 import java.util.List;
 import java.util.logging.Level;
-import java.math.BigInteger;
 import java.util.Random;
-
-import java.io.ByteArrayOutputStream;
-import java.io.UnsupportedEncodingException;
-import java.io.IOException;
-
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.security.InvalidKeyException;
@@ -43,6 +41,8 @@
 import java.security.spec.InvalidKeySpecException;
 import java.security.InvalidAlgorithmParameterException;
 
+import static java.nio.charset.StandardCharsets.*;
+
 import javax.crypto.Cipher;
 import javax.crypto.SecretKey;
 import javax.crypto.Mac;
@@ -54,11 +54,11 @@
 import javax.crypto.spec.DESKeySpec;
 import javax.crypto.spec.DESedeKeySpec;
 
+import javax.security.auth.callback.CallbackHandler;
 import javax.security.sasl.*;
+
 import com.sun.security.sasl.util.AbstractSaslImpl;
 
-import javax.security.auth.callback.CallbackHandler;
-
 /**
  * Utility class for DIGEST-MD5 mechanism. Provides utility methods
  * and contains two inner classes which implement the SecurityCtx
@@ -151,7 +151,7 @@
     protected String negotiatedQop;
     protected String negotiatedRealm;
     protected boolean useUTF8 = false;
-    protected String encoding = "8859_1";  // default unless server specifies utf-8
+    protected Charset encoding = ISO_8859_1;  // default unless server specifies utf-8
 
     protected String digestUri;
     protected String authzid;       // authzid or canonicalized authzid
@@ -384,8 +384,7 @@
      * @param a non-null byte array
      * @return a non-null String contain the HEX value
      */
-    protected byte[] binaryToHex(byte[] digest) throws
-    UnsupportedEncodingException {
+    protected byte[] binaryToHex(byte[] digest) {
 
         StringBuilder digestString = new StringBuilder();
 
@@ -405,26 +404,21 @@
      * if all chars in string are within the 8859_1 (Latin 1) encoding range.
      *
      * @param a non-null String
-     * @return a non-nuill byte array containing the correct character encoding
+     * @return a non-null byte array containing the correct character encoding
      * for username, paswd or realm.
      */
-    protected byte[] stringToByte_8859_1(String str) throws SaslException {
+    protected byte[] stringToByte_8859_1(String str) {
 
         char[] buffer = str.toCharArray();
 
-        try {
-            if (useUTF8) {
-                for( int i = 0; i< buffer.length; i++ ) {
-                    if( buffer[i] > '\u00FF' ) {
-                        return str.getBytes("UTF8");
-                    }
+        if (useUTF8) {
+            for (int i = 0; i < buffer.length; i++) {
+                if (buffer[i] > '\u00FF') {
+                    return str.getBytes(UTF_8);
                 }
             }
-            return str.getBytes("8859_1");
-        } catch (UnsupportedEncodingException e) {
-            throw new SaslException(
-                "cannot encode string in UTF8 or 8859-1 (Latin-1)", e);
         }
+        return str.getBytes(ISO_8859_1);
     }
 
     protected static byte[] getPlatformCiphers() {
@@ -461,8 +455,6 @@
      * @return A non-null byte array containing the repsonse-value.
      * @throws NoSuchAlgorithmException if the platform does not have MD5
      * digest support.
-     * @throws UnsupportedEncodingException if a an error occurs
-     * encoding a string into either Latin-1 or UTF-8.
      * @throws IOException if an error occurs writing to the output
      * byte array buffer.
      */
@@ -478,7 +470,6 @@
         int nonceCount,
         byte[] authzidValue
         ) throws NoSuchAlgorithmException,
-            UnsupportedEncodingException,
             IOException {
 
         MessageDigest md5 = MessageDigest.getInstance("MD5");
@@ -845,14 +836,9 @@
             try {
                 generateIntegrityKeyPair(clientMode);
 
-            } catch (UnsupportedEncodingException e) {
-                throw new SaslException(
-                    "DIGEST-MD5: Error encoding strings into UTF-8", e);
-
             } catch (IOException e) {
                 throw new SaslException("DIGEST-MD5: Error accessing buffers " +
                     "required to create integrity key pairs", e);
-
             } catch (NoSuchAlgorithmException e) {
                 throw new SaslException("DIGEST-MD5: Unsupported digest " +
                     "algorithm used to create integrity key pairs", e);
@@ -866,16 +852,13 @@
          * Generate client-server, server-client key pairs for DIGEST-MD5
          * integrity checking.
          *
-         * @throws UnsupportedEncodingException if the UTF-8 encoding is not
-         * supported on the platform.
          * @throws IOException if an error occurs when writing to or from the
          * byte array output buffers.
          * @throws NoSuchAlgorithmException if the MD5 message digest algorithm
          * cannot loaded.
          */
         private void generateIntegrityKeyPair(boolean clientMode)
-            throws UnsupportedEncodingException, IOException,
-                NoSuchAlgorithmException {
+            throws IOException, NoSuchAlgorithmException {
 
             byte[] cimagic = CLIENT_INT_MAGIC.getBytes(encoding);
             byte[] simagic = SVR_INT_MAGIC.getBytes(encoding);
@@ -1130,11 +1113,6 @@
 
             } catch (SaslException e) {
                 throw e;
-
-            } catch (UnsupportedEncodingException e) {
-                throw new SaslException(
-                    "DIGEST-MD5: Error encoding string value into UTF-8", e);
-
             } catch (IOException e) {
                 throw new SaslException("DIGEST-MD5: Error accessing " +
                     "buffers required to generate cipher keys", e);
@@ -1152,14 +1130,11 @@
          * byte array output buffers.
          * @throws NoSuchAlgorithmException if the MD5 message digest algorithm
          * cannot loaded.
-         * @throws UnsupportedEncodingException if an UTF-8 encoding is not
-         * supported on the platform.
-         * @throw SaslException if an error occurs initializing the keys and
+         * @throws SaslException if an error occurs initializing the keys and
          * IVs for the chosen cipher.
          */
         private void generatePrivacyKeyPair(boolean clientMode)
-            throws IOException, UnsupportedEncodingException,
-            NoSuchAlgorithmException, SaslException {
+            throws IOException, NoSuchAlgorithmException, SaslException {
 
             byte[] ccmagic = CLIENT_CONF_MAGIC.getBytes(encoding);
             byte[] scmagic = SVR_CONF_MAGIC.getBytes(encoding);
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/digest/DigestMD5Client.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/digest/DigestMD5Client.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,14 +28,14 @@
 import java.security.NoSuchAlgorithmException;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
 import java.util.StringTokenizer;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 import java.util.Arrays;
+import java.util.logging.Level;
 
-import java.util.logging.Level;
+import static java.nio.charset.StandardCharsets.UTF_8;
 
 import javax.security.sasl.*;
 import javax.security.auth.callback.CallbackHandler;
@@ -155,13 +155,7 @@
         // authzID can only be encoded in UTF8 - RFC 2222
         if (authzid != null) {
             this.authzid = authzid;
-            try {
-                authzidBytes = authzid.getBytes("UTF8");
-
-            } catch (UnsupportedEncodingException e) {
-                throw new SaslException(
-                    "DIGEST-MD5: Error encoding authzid value into UTF-8", e);
-            }
+            authzidBytes = authzid.getBytes(UTF_8);
         }
 
         if (props != null) {
@@ -272,7 +266,7 @@
     * digest challenge format is detected.
     */
     private void processChallenge(byte[][] challengeVal, List<byte[]> realmChoices)
-        throws SaslException, UnsupportedEncodingException {
+        throws SaslException {
 
         /* CHARSET: optional atmost once */
         if (challengeVal[CHARSET] != null) {
@@ -281,7 +275,7 @@
                     "violation. Unrecognised charset value: " +
                     new String(challengeVal[CHARSET]));
             } else {
-                encoding = "UTF8";
+                encoding = UTF_8;
                 useUTF8 = true;
             }
         }
--- a/src/java.security.sasl/share/classes/com/sun/security/sasl/digest/DigestMD5Server.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/java.security.sasl/share/classes/com/sun/security/sasl/digest/DigestMD5Server.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,10 +25,9 @@
 
 package com.sun.security.sasl.digest;
 
-import java.security.NoSuchAlgorithmException;
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
-import java.io.UnsupportedEncodingException;
+import java.security.NoSuchAlgorithmException;
 import java.util.StringTokenizer;
 import java.util.ArrayList;
 import java.util.List;
@@ -40,6 +39,8 @@
 import javax.security.sasl.*;
 import javax.security.auth.callback.*;
 
+import static java.nio.charset.StandardCharsets.*;
+
 /**
   * An implementation of the DIGEST-MD5 server SASL mechanism.
   * (<a href="http://www.ietf.org/rfc/rfc2831.txt">RFC 2831</a>)
@@ -171,7 +172,7 @@
             }
         }
 
-        encoding = (useUTF8 ? "UTF8" : "8859_1");
+        encoding = (useUTF8 ? UTF_8 : ISO_8859_1);
 
         // By default, use server name as realm
         if (serverRealms.isEmpty()) {
@@ -229,9 +230,6 @@
 
                 step = 3;
                 return challenge;
-            } catch (UnsupportedEncodingException e) {
-                throw new SaslException(
-                    "DIGEST-MD5: Error encoding challenge", e);
             } catch (IOException e) {
                 throw new SaslException(
                     "DIGEST-MD5: Error generating challenge", e);
@@ -247,11 +245,6 @@
                 byte[][] responseVal = parseDirectives(response, DIRECTIVE_KEY,
                     null, REALM);
                 challenge = validateClientResponse(responseVal);
-            } catch (SaslException e) {
-                throw e;
-            } catch (UnsupportedEncodingException e) {
-                throw new SaslException(
-                    "DIGEST-MD5: Error validating client response", e);
             } finally {
                 step = 0;  // Set to invalid state
             }
@@ -298,7 +291,7 @@
      *        auth-param        = token "=" ( token | quoted-string )
      */
     private byte[] generateChallenge(List<String> realms, String qopStr,
-        String cipherStr) throws UnsupportedEncodingException, IOException {
+        String cipherStr) throws IOException {
         ByteArrayOutputStream out = new ByteArrayOutputStream();
 
         // Realms (>= 0)
@@ -389,7 +382,7 @@
      * @return response-value ('rspauth') for client to validate
      */
     private byte[] validateClientResponse(byte[][] responseVal)
-        throws SaslException, UnsupportedEncodingException {
+        throws SaslException {
 
         /* CHARSET: optional atmost once */
         if (responseVal[CHARSET] != null) {
--- a/src/jdk.compiler/share/classes/com/sun/source/tree/CaseTree.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/source/tree/CaseTree.java	Thu Nov 14 13:50:03 2019 +0000
@@ -51,29 +51,18 @@
      * {@code null} if this is the default case.
      * If this case has multiple labels, returns the first label.
      * @return the expression for the case, or null
+     * @deprecated Please use {@link #getExpressions()}.
      */
+    @Deprecated
     ExpressionTree getExpression();
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * Returns the labels for this case.
      * For default case, returns an empty list.
      *
      * @return labels for this case
      * @since 12
-     *
-     * @preview This method is modeling a case with multiple labels,
-     * which is part of a preview feature and may be removed
-     * if the preview feature is removed.
      */
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
     List<? extends ExpressionTree> getExpressions();
 
     /**
@@ -86,14 +75,6 @@
     List<? extends StatementTree> getStatements();
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * For case with kind {@linkplain CaseKind#RULE},
      * returns the statement or expression after the arrow.
      * Returns {@code null} for case with kind
@@ -102,40 +83,21 @@
      * @return case value or null
      * @since 12
      */
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
     public default Tree getBody() {
         return null;
     }
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * Returns the kind of this case.
      *
      * @return the kind of this case
      * @since 12
      */
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     public default CaseKind getCaseKind() {
         return CaseKind.STATEMENT;
     }
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This enum is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * The syntatic form of this case:
      * <ul>
      *     <li>STATEMENT: {@code case <expression>: <statements>}</li>
@@ -144,8 +106,6 @@
      *
      * @since 12
      */
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     public enum CaseKind {
         /**
          * Case is in the form: {@code case <expression>: <statements>}.
--- a/src/jdk.compiler/share/classes/com/sun/source/tree/SwitchExpressionTree.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/source/tree/SwitchExpressionTree.java	Thu Nov 14 13:50:03 2019 +0000
@@ -28,14 +28,6 @@
 import java.util.List;
 
 /**
- * {@preview Associated with switch expressions, a preview feature of
- *           the Java language.
- *
- *           This interface is associated with <i>switch expressions</i>, a preview
- *           feature of the Java language. Preview features
- *           may be removed in a future release, or upgraded to permanent
- *           features of the Java language.}
- *
  * A tree node for a {@code switch} expression.
  *
  * For example:
@@ -49,7 +41,6 @@
  *
  * @since 12
  */
-@jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
 public interface SwitchExpressionTree extends ExpressionTree {
     /**
      * Returns the expression for the {@code switch} expression.
--- a/src/jdk.compiler/share/classes/com/sun/source/tree/Tree.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/source/tree/Tree.java	Thu Nov 14 13:50:03 2019 +0000
@@ -240,20 +240,10 @@
         SWITCH(SwitchTree.class),
 
         /**
-         * {@preview Associated with switch expressions, a preview feature of
-         *           the Java language.
-         *
-         *           This enum constant is associated with <i>switch expressions</i>, a preview
-         *           feature of the Java language. Preview features
-         *           may be removed in a future release, or upgraded to permanent
-         *           features of the Java language.}
-         *
          * Used for instances of {@link SwitchExpressionTree}.
          *
          * @since 12
          */
-        @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-        @SuppressWarnings("preview")
         SWITCH_EXPRESSION(SwitchExpressionTree.class),
 
         /**
@@ -662,20 +652,10 @@
         OTHER(null),
 
         /**
-         * {@preview Associated with switch expressions, a preview feature of
-         *           the Java language.
-         *
-         *           This enum constant is associated with <i>switch expressions</i>, a preview
-         *           feature of the Java language. Preview features
-         *           may be removed in a future release, or upgraded to permanent
-         *           features of the Java language.}
-         *
          * Used for instances of {@link YieldTree}.
          *
          * @since 13
          */
-        @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-        @SuppressWarnings("preview")
         YIELD(YieldTree.class);
 
 
--- a/src/jdk.compiler/share/classes/com/sun/source/tree/TreeVisitor.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/source/tree/TreeVisitor.java	Thu Nov 14 13:50:03 2019 +0000
@@ -354,14 +354,6 @@
     R visitSwitch(SwitchTree node, P p);
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * Visits a SwitchExpressionTree node.
      *
      * @param node the node being visited
@@ -369,8 +361,6 @@
      * @return a result value
      * @since 12
      */
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     R visitSwitchExpression(SwitchExpressionTree node, P p);
 
     /**
@@ -560,21 +550,11 @@
     R visitOther(Tree node, P p);
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * Visits a YieldTree node.
      * @param node the node being visited
      * @param p a parameter value
      * @return a result value
      * @since 13
      */
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     R visitYield(YieldTree node, P p);
 }
--- a/src/jdk.compiler/share/classes/com/sun/source/tree/YieldTree.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/source/tree/YieldTree.java	Thu Nov 14 13:50:03 2019 +0000
@@ -26,14 +26,6 @@
 package com.sun.source.tree;
 
 /**
- * {@preview Associated with switch expressions, a preview feature of
- *           the Java language.
- *
- *           This method is associated with <i>switch expressions</i>, a preview
- *           feature of the Java language. Preview features
- *           may be removed in a future release, or upgraded to permanent
- *           features of the Java language.}
- *
  * A tree node for a {@code yield} statement.
  *
  * For example:
@@ -45,7 +37,6 @@
  *
  * @since 13
  */
-@jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
 public interface YieldTree extends StatementTree {
 
     /**
--- a/src/jdk.compiler/share/classes/com/sun/source/util/SimpleTreeVisitor.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/source/util/SimpleTreeVisitor.java	Thu Nov 14 13:50:03 2019 +0000
@@ -264,14 +264,6 @@
     }
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * {@inheritDoc} This implementation calls {@code defaultAction}.
      *
      * @param node {@inheritDoc}
@@ -279,8 +271,6 @@
      * @return  the result of {@code defaultAction}
      */
     @Override
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     public R visitSwitchExpression(SwitchExpressionTree node, P p) {
         return defaultAction(node, p);
     }
@@ -794,8 +784,6 @@
      * @return  the result of {@code defaultAction}
      */
     @Override
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     public R visitYield(YieldTree node, P p) {
         return defaultAction(node, p);
     }
--- a/src/jdk.compiler/share/classes/com/sun/source/util/TreeScanner.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/source/util/TreeScanner.java	Thu Nov 14 13:50:03 2019 +0000
@@ -334,14 +334,6 @@
     }
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * {@inheritDoc} This implementation scans the children in left to right order.
      *
      * @param node  {@inheritDoc}
@@ -349,8 +341,6 @@
      * @return the result of scanning
      */
     @Override
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     public R visitSwitchExpression(SwitchExpressionTree node, P p) {
         R r = scan(node.getExpression(), p);
         r = scanAndReduce(node.getCases(), p, r);
@@ -365,7 +355,6 @@
      * @return the result of scanning
      */
     @Override
-    @SuppressWarnings("preview")
     public R visitCase(CaseTree node, P p) {
         R r = scan(node.getExpressions(), p);
         if (node.getCaseKind() == CaseTree.CaseKind.RULE)
@@ -938,14 +927,6 @@
     }
 
     /**
-     * {@preview Associated with switch expressions, a preview feature of
-     *           the Java language.
-     *
-     *           This method is associated with <i>switch expressions</i>, a preview
-     *           feature of the Java language. Preview features
-     *           may be removed in a future release, or upgraded to permanent
-     *           features of the Java language.}
-     *
      * {@inheritDoc} This implementation returns {@code null}.
      *
      * @param node  {@inheritDoc}
@@ -953,8 +934,6 @@
      * @return the result of scanning
      */
     @Override
-    @jdk.internal.PreviewFeature(feature=jdk.internal.PreviewFeature.Feature.SWITCH_EXPRESSIONS)
-    @SuppressWarnings("preview")
     public R visitYield(YieldTree node, P p) {
         return scan(node.getValue(), p);
     }
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Preview.java	Thu Nov 14 13:50:03 2019 +0000
@@ -165,10 +165,7 @@
      * @return true, if given feature is a preview feature.
      */
     public boolean isPreview(Feature feature) {
-        if (feature == Feature.SWITCH_EXPRESSION ||
-            feature == Feature.SWITCH_MULTIPLE_CASE_LABELS ||
-            feature == Feature.SWITCH_RULE ||
-            feature == Feature.TEXT_BLOCKS)
+        if (feature == Feature.TEXT_BLOCKS)
             return true;
         //Note: this is a backdoor which allows to optionally treat all features as 'preview' (for testing).
         //When real preview features will be added, this method can be implemented to return 'true'
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java	Thu Nov 14 13:50:03 2019 +0000
@@ -84,7 +84,7 @@
     /** 1.11 local-variable syntax for lambda parameters */
     JDK11("11"),
 
-    /** 12, no language features; switch expression were in preview */
+    /** 12, no language features; switch expression in preview */
     JDK12("12"),
 
     /**
@@ -94,8 +94,7 @@
     JDK13("13"),
 
     /**
-     * 14 covers the to be determined language features that will be
-     * added in JDK 14.
+     * 14, switch expressions
      */
     JDK14("14");
 
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symtab.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Symtab.java	Thu Nov 14 13:50:03 2019 +0000
@@ -785,6 +785,7 @@
         unnamedPackage.modle = module;
         //we cannot use a method reference below, as initialCompleter might be null now
         unnamedPackage.completer = s -> initialCompleter.complete(s);
+        unnamedPackage.flags_field |= EXISTS;
         module.unnamedPackage = unnamedPackage;
     }
 
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/comp/Attr.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1467,7 +1467,6 @@
             // check that there are no duplicate case labels or default clauses.
             Set<Object> labels = new HashSet<>(); // The set of case labels.
             boolean hasDefault = false;      // Is there a default label?
-            @SuppressWarnings("preview")
             CaseTree.CaseKind caseKind = null;
             boolean wasError = false;
             for (List<JCCase> l = cases; l.nonEmpty(); l = l.tail) {
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/parser/JavacParser.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1432,7 +1432,6 @@
         }
         List<JCStatement> stats = null;
         JCTree body = null;
-        @SuppressWarnings("preview")
         CaseTree.CaseKind kind;
         switch (token.kind) {
             case ARROW:
@@ -2897,7 +2896,6 @@
                 nextToken();
                 checkSourceLevel(Feature.SWITCH_MULTIPLE_CASE_LABELS);
             };
-            @SuppressWarnings("preview")
             CaseTree.CaseKind caseKind;
             JCTree body = null;
             if (token.kind == ARROW) {
@@ -2922,7 +2920,6 @@
         }
         case DEFAULT: {
             nextToken();
-            @SuppressWarnings("preview")
             CaseTree.CaseKind caseKind;
             JCTree body = null;
             if (token.kind == ARROW) {
@@ -3300,7 +3297,7 @@
             if (allowYieldStatement) {
                 return true;
             } else if (shouldWarn) {
-                log.warning(pos, Warnings.RestrictedTypeNotAllowedPreview(name, Source.JDK13));
+                log.warning(pos, Warnings.RestrictedTypeNotAllowed(name, Source.JDK14));
             }
         }
         return false;
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/JCTree.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/JCTree.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1249,17 +1249,14 @@
     public static class JCCase extends JCStatement implements CaseTree {
         //as CaseKind is deprecated for removal (as it is part of a preview feature),
         //using indirection through these fields to avoid unnecessary @SuppressWarnings:
-        @SuppressWarnings("preview")
         public static final CaseKind STATEMENT = CaseKind.STATEMENT;
-        @SuppressWarnings("preview")
         public static final CaseKind RULE = CaseKind.RULE;
-        @SuppressWarnings("preview")
         public final CaseKind caseKind;
         public List<JCExpression> pats;
         public List<JCStatement> stats;
         public JCTree body;
         public boolean completesNormally;
-        protected JCCase(@SuppressWarnings("preview") CaseKind caseKind, List<JCExpression> pats,
+        protected JCCase(CaseKind caseKind, List<JCExpression> pats,
                          List<JCStatement> stats, JCTree body) {
             Assert.checkNonNull(pats);
             Assert.check(pats.isEmpty() || pats.head != null);
@@ -1273,21 +1270,17 @@
 
         @Override @DefinedBy(Api.COMPILER_TREE)
         public Kind getKind() { return Kind.CASE; }
-        @Override @DefinedBy(Api.COMPILER_TREE)
+        @Override @Deprecated @DefinedBy(Api.COMPILER_TREE)
         public JCExpression getExpression() { return pats.head; }
         @Override @DefinedBy(Api.COMPILER_TREE)
-        @SuppressWarnings("preview")
         public List<JCExpression> getExpressions() { return pats; }
         @Override @DefinedBy(Api.COMPILER_TREE)
-        @SuppressWarnings("preview")
         public List<JCStatement> getStatements() {
             return caseKind == CaseKind.STATEMENT ? stats : null;
         }
         @Override @DefinedBy(Api.COMPILER_TREE)
-        @SuppressWarnings("preview")
         public JCTree getBody() { return body; }
         @Override @DefinedBy(Api.COMPILER_TREE)
-        @SuppressWarnings("preview")
         public CaseKind getCaseKind() {
             return caseKind;
         }
@@ -1304,7 +1297,6 @@
     /**
      * A "switch ( ) { }" construction.
      */
-    @SuppressWarnings("preview")
     public static class JCSwitchExpression extends JCPolyExpression implements SwitchExpressionTree {
         public JCExpression selector;
         public List<JCCase> cases;
@@ -1585,7 +1577,6 @@
     /**
      * A break-with from a switch expression.
      */
-    @SuppressWarnings("preview")
     public static class JCYield extends JCStatement implements YieldTree {
         public JCExpression value;
         public JCTree target;
@@ -3104,7 +3095,7 @@
         JCLabeledStatement Labelled(Name label, JCStatement body);
         JCSwitch Switch(JCExpression selector, List<JCCase> cases);
         JCSwitchExpression SwitchExpression(JCExpression selector, List<JCCase> cases);
-        JCCase Case(@SuppressWarnings("preview") CaseTree.CaseKind caseKind, List<JCExpression> pat,
+        JCCase Case(CaseTree.CaseKind caseKind, List<JCExpression> pat,
                     List<JCStatement> stats, JCTree body);
         JCSynchronized Synchronized(JCExpression lock, JCBlock body);
         JCTry Try(JCBlock body, List<JCCatch> catchers, JCBlock finalizer);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeCopier.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeCopier.java	Thu Nov 14 13:50:03 2019 +0000
@@ -144,7 +144,6 @@
     }
 
     @DefinedBy(Api.COMPILER_TREE)
-    @SuppressWarnings("preview")
     public JCTree visitYield(YieldTree node, P p) {
         JCYield t = (JCYield) node;
         JCExpression value = copy(t.value, p);
@@ -380,7 +379,6 @@
     }
 
     @DefinedBy(Api.COMPILER_TREE)
-    @SuppressWarnings("preview")
     public JCTree visitSwitchExpression(SwitchExpressionTree node, P p) {
         JCSwitchExpression t = (JCSwitchExpression) node;
         JCExpression selector = copy(t.selector, p);
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeMaker.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/tree/TreeMaker.java	Thu Nov 14 13:50:03 2019 +0000
@@ -274,7 +274,7 @@
         return tree;
     }
 
-    public JCCase Case(@SuppressWarnings("preview") CaseTree.CaseKind caseKind, List<JCExpression> pats,
+    public JCCase Case(CaseTree.CaseKind caseKind, List<JCExpression> pats,
                        List<JCStatement> stats, JCTree body) {
         JCCase tree = new JCCase(caseKind, pats, stats, body);
         tree.pos = pos;
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11KeyStore.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11KeyStore.java	Thu Nov 14 13:50:03 2019 +0000
@@ -31,7 +31,8 @@
 import java.io.OutputStream;
 import java.io.IOException;
 import java.io.ByteArrayInputStream;
-import java.io.UnsupportedEncodingException;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
 
 import java.util.Arrays;
 import java.util.Collections;
@@ -2154,11 +2155,7 @@
         if (!printable) {
             return "0x" + Functions.toHexString(bytes);
         } else {
-            try {
-                return new String(bytes, "UTF-8");
-            } catch (UnsupportedEncodingException uee) {
-                return "0x" + Functions.toHexString(bytes);
-            }
+            return new String(bytes, UTF_8);
         }
     }
 
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11TlsPrfGenerator.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11TlsPrfGenerator.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,8 @@
 import javax.crypto.*;
 import javax.crypto.spec.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 import sun.security.internal.spec.TlsPrfParameterSpec;
 
 import static sun.security.pkcs11.TemplateManager.*;
@@ -167,7 +169,7 @@
             }
         }
 
-        byte[] label = P11Util.getBytesUTF8(spec.getLabel());
+        byte[] label = spec.getLabel().getBytes(UTF_8);
 
         if (mechanism == CKM_NSS_TLS_PRF_GENERAL) {
             Session session = null;
--- a/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Util.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.crypto.cryptoki/share/classes/sun/security/pkcs11/P11Util.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -143,14 +143,6 @@
         return b;
     }
 
-    static byte[] getBytesUTF8(String s) {
-        try {
-            return s.getBytes("UTF8");
-        } catch (java.io.UnsupportedEncodingException e) {
-            throw new RuntimeException(e);
-        }
-    }
-
     static byte[] sha1(byte[] data) {
         try {
             MessageDigest md = MessageDigest.getInstance("SHA-1");
--- a/src/jdk.crypto.ucrypto/solaris/classes/com/oracle/security/ucrypto/Config.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.crypto.ucrypto/solaris/classes/com/oracle/security/ucrypto/Config.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,15 +26,17 @@
 package com.oracle.security.ucrypto;
 
 import java.io.*;
-import static java.io.StreamTokenizer.*;
 import java.math.BigInteger;
 import java.util.*;
+import java.security.*;
 
-import java.security.*;
+import static java.io.StreamTokenizer.*;
+import static java.nio.charset.StandardCharsets.ISO_8859_1;
 
 import sun.security.action.GetPropertyAction;
 import sun.security.util.PropertyExpander;
 
+
 /**
  * Configuration container and file parsing.
  *
@@ -66,8 +68,8 @@
 
     Config(String filename) throws IOException {
         FileInputStream in = new FileInputStream(expand(filename));
-        reader = new BufferedReader(new InputStreamReader(in, "ISO-8859-1"));
-        parsedKeywords = new HashSet<String>();
+        reader = new BufferedReader(new InputStreamReader(in, ISO_8859_1));
+        parsedKeywords = new HashSet<>();
         st = new StreamTokenizer(reader);
         setupTokenizer();
         parse();
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/AdaptiveFreeList.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * @(#)AdaptiveFreeList.java
- *
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.util.Observable;
-import java.util.Observer;
-
-import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.runtime.VM;
-import sun.jvm.hotspot.runtime.VMObject;
-import sun.jvm.hotspot.types.CIntegerField;
-import sun.jvm.hotspot.types.Type;
-import sun.jvm.hotspot.types.TypeDataBase;
-
-public class AdaptiveFreeList extends VMObject {
-  static {
-    VM.registerVMInitializedObserver(new Observer() {
-      public void update(Observable o, Object data) {
-        initialize(VM.getVM().getTypeDataBase());
-      }
-    });
-  }
-
-  private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("AdaptiveFreeList<FreeChunk>");
-    sizeField = type.getCIntegerField("_size");
-    countField = type.getCIntegerField("_count");
-    headerSize = type.getSize();
-  }
-
-  // Fields
-  private static CIntegerField sizeField;
-  private static CIntegerField countField;
-  private static long          headerSize;
-
-  //Constructor
-  public AdaptiveFreeList(Address address) {
-    super(address);
-  }
-
-  // Accessors
-  public long size() {
-    return sizeField.getValue(addr);
-  }
-
-  public long count() {
-    return  countField.getValue(addr);
-  }
-
-  public static long sizeOf() {
-    return headerSize;
-  }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSBitMap.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.memory.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class CMSBitMap extends VMObject {
-  private static AddressField bmStartWordField;
-  private static CIntegerField bmWordSizeField;
-  private static CIntegerField shifterField;
-  //private static AddressField bmField;
-  private static long virtualSpaceFieldOffset;
-
-  public CMSBitMap(Address addr) {
-    super(addr);
-  }
-
-  static {
-    VM.registerVMInitializedObserver(new Observer() {
-        public void update(Observable o, Object data) {
-          initialize(VM.getVM().getTypeDataBase());
-        }
-      });
-  }
-
-  private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("CMSBitMap");
-    bmStartWordField = type.getAddressField("_bmStartWord");
-    bmWordSizeField = type.getCIntegerField("_bmWordSize");
-    shifterField = type.getCIntegerField("_shifter");
-    //bmField = type.getAddressField("_bm");
-    virtualSpaceFieldOffset = type.getField("_virtual_space").getOffset();
-  }
-  public void printAll() {
-    System.out.println("bmStartWord(): "+bmStartWord());
-    System.out.println("bmWordSize(): "+bmWordSize());
-    System.out.println("shifter(): "+shifter());
-  }
-
-  public Address bmStartWord() {
-    return bmStartWordField.getValue(addr);
-  }
-  public long bmWordSize() {
-    return bmWordSizeField.getValue(addr);
-  }
-  public long shifter() {
-    return shifterField.getValue(addr);
-  }
-  public VirtualSpace virtualSpace() {
-    return (VirtualSpace) VMObjectFactory.newObject(VirtualSpace.class, addr.addOffsetTo(virtualSpaceFieldOffset));
-  }
-
-  public BitMap bm() {
-    BitMap bitMap = new BitMap((int) (bmWordSize() >> shifter() ));
-    VirtualSpace vs = virtualSpace();
-    bitMap.set_map(vs.low());
-    return bitMap;
-  }
-
-  public Address getNextMarkedWordAddress(Address addr) {
-    Address endWord = bmStartWord().addOffsetTo(bmWordSize());
-    int nextOffset = bm().getNextOneOffset(heapWordToOffset(addr), heapWordToOffset(endWord) );
-    Address nextAddr = offsetToHeapWord(nextOffset);
-    return nextAddr;
-  }
-
-  int heapWordToOffset(Address addr) {
-    int temp = (int)addr.minus(bmStartWord()) / (int) VM.getVM().getAddressSize();
-    int ret_val = temp >> shifter();
-    return ret_val;
-  }
-
-  Address offsetToHeapWord(int offset) {
-    int temp = offset << shifter();
-    return bmStartWord().addOffsetTo(temp*VM.getVM().getAddressSize());
-  }
-
-  boolean isMarked(Address addr) {
-    BitMap bm = bm();
-    return bm.at(heapWordToOffset(addr));
-  }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSCollector.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-public class CMSCollector extends VMObject {
-  private static long markBitMapFieldOffset;
-
-  public CMSCollector(Address addr) {
-    super(addr);
-  }
-
-  static {
-    VM.registerVMInitializedObserver(new Observer() {
-        public void update(Observable o, Object data) {
-          initialize(VM.getVM().getTypeDataBase());
-        }
-      });
-  }
-
-  private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("CMSCollector");
-    markBitMapFieldOffset = type.getField("_markBitMap").getOffset();
-  }
-
-  //Accessing mark bitmap
-  public CMSBitMap markBitMap() {
-   return (CMSBitMap) VMObjectFactory.newObject(
-                                CMSBitMap.class,
-                                addr.addOffsetTo(markBitMapFieldOffset));
-  }
-
-  public long blockSizeUsingPrintezisBits(Address addr) {
-    CMSBitMap markBitMap = markBitMap();
-    long addressSize = VM.getVM().getAddressSize();
-    if ( markBitMap.isMarked(addr) &&  markBitMap.isMarked(addr.addOffsetTo(1*addressSize)) ) {
-      Address nextOneAddr = markBitMap.getNextMarkedWordAddress(addr.addOffsetTo(2*addressSize));
-      //return size in bytes
-      long size =  (nextOneAddr.addOffsetTo(1*addressSize)).minus(addr);
-      return size;
-    } else {
-      //missing Printezis marks
-      return -1;
-    }
-
-  }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CMSHeap.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import sun.jvm.hotspot.debugger.Address;
-import sun.jvm.hotspot.gc.shared.GenCollectedHeap;
-import sun.jvm.hotspot.gc.shared.CollectedHeapName;
-
-public class CMSHeap extends GenCollectedHeap {
-
-  public CMSHeap(Address addr) {
-    super(addr);
-  }
-
-  public CollectedHeapName kind() {
-    return CollectedHeapName.CMS;
-  }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/CompactibleFreeListSpace.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,209 +0,0 @@
-/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.shared.*;
-import sun.jvm.hotspot.memory.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
-
-public class CompactibleFreeListSpace extends CompactibleSpace implements LiveRegionsProvider {
-   private static AddressField collectorField;
-   private static AddressField indexedFreeListField;
-   private static AddressField dictionaryField;
-   private static long         smallLinearAllocBlockFieldOffset;
-
-   private int heapWordSize;     // 4 for 32bit, 8 for 64 bits
-   private int IndexSetStart;    // for small indexed list
-   private int IndexSetSize;
-   private int IndexSetStride;
-   private static long MinChunkSizeInBytes;
-
-   static {
-      VM.registerVMInitializedObserver(new Observer() {
-         public void update(Observable o, Object data) {
-            initialize(VM.getVM().getTypeDataBase());
-         }
-      });
-   }
-
-   private static synchronized void initialize(TypeDataBase db) {
-      long sizeofFreeChunk = db.lookupType("FreeChunk").getSize();
-      VM vm = VM.getVM();
-
-     Type type = db.lookupType("CompactibleFreeListSpace");
-     collectorField = type.getAddressField("_collector");
-     collectorField       = type.getAddressField("_collector");
-     dictionaryField      = type.getAddressField("_dictionary");
-     indexedFreeListField = type.getAddressField("_indexedFreeList[0]");
-     smallLinearAllocBlockFieldOffset = type.getField("_smallLinearAllocBlock").getOffset();
-     MinChunkSizeInBytes = (type.getCIntegerField("_min_chunk_size_in_bytes")).getValue();
-   }
-
-   public CompactibleFreeListSpace(Address addr) {
-      super(addr);
-      VM vm = VM.getVM();
-      heapWordSize   = vm.getHeapWordSize();
-      IndexSetStart  = vm.getMinObjAlignmentInBytes() / heapWordSize;
-      IndexSetStride = IndexSetStart;
-      IndexSetSize   = vm.getIndexSetSize();
-   }
-
-   // Accessing block offset table
-   public CMSCollector collector() {
-    return (CMSCollector) VMObjectFactory.newObject(
-                                 CMSCollector.class,
-                                 collectorField.getValue(addr));
-   }
-
-   public long free0() {
-     return capacity() - used0();
-   }
-
-   public long used() {
-     return capacity() - free();
-   }
-
-   public long used0() {
-      List<MemRegion> regions = getLiveRegions();
-      long usedSize = 0L;
-      for (Iterator<MemRegion> itr = regions.iterator(); itr.hasNext();) {
-         MemRegion mr = itr.next();
-         usedSize += mr.byteSize();
-      }
-      return usedSize;
-   }
-
-   public long free() {
-      // small chunks
-      long size = 0;
-      Address cur = addr.addOffsetTo( indexedFreeListField.getOffset() );
-      cur = cur.addOffsetTo(IndexSetStart*AdaptiveFreeList.sizeOf());
-      for (int i=IndexSetStart; i<IndexSetSize; i += IndexSetStride) {
-         AdaptiveFreeList freeList = (AdaptiveFreeList) VMObjectFactory.newObject(AdaptiveFreeList.class, cur);
-         size += i*freeList.count();
-         cur= cur.addOffsetTo(IndexSetStride*AdaptiveFreeList.sizeOf());
-      }
-
-      // large block
-      AFLBinaryTreeDictionary aflbd = (AFLBinaryTreeDictionary) VMObjectFactory.newObject(AFLBinaryTreeDictionary.class,
-                                                                                   dictionaryField.getValue(addr));
-      size += aflbd.size();
-
-
-      // linear block in TLAB
-      LinearAllocBlock lab = (LinearAllocBlock) VMObjectFactory.newObject(LinearAllocBlock.class,
-                                                                          addr.addOffsetTo(smallLinearAllocBlockFieldOffset));
-      size += lab.word_size();
-
-      return size*heapWordSize;
-  }
-
-   public void printOn(PrintStream tty) {
-      tty.print("free-list-space");
-      tty.print("[ " + bottom() + " , " + end() + " ) ");
-      long cap = capacity();
-      long used_size = used();
-      long free_size = free();
-      int  used_perc = (int)((double)used_size/cap*100);
-      tty.print("space capacity = " + cap + " used(" + used_perc + "%)= " + used_size + " ");
-      tty.print("free= " + free_size );
-      tty.print("\n");
-
-   }
-
-   public Address skipBlockSizeUsingPrintezisBits(Address pos) {
-       CMSCollector collector = collector();
-       long size = 0;
-       Address addr = null;
-
-       if (collector != null) {
-         size = collector.blockSizeUsingPrintezisBits(pos);
-         if (size >= 3) {
-           addr = pos.addOffsetTo(adjustObjectSizeInBytes(size));
-         }
-       }
-       return addr;
-   }
-
-  @Override
-   public List<MemRegion> getLiveRegions() {
-      List<MemRegion> res = new ArrayList<>();
-      VM vm = VM.getVM();
-      Debugger dbg = vm.getDebugger();
-      ObjectHeap heap = vm.getObjectHeap();
-      Address cur = bottom();
-      Address regionStart = cur;
-      Address limit = end();
-      final long addressSize = vm.getAddressSize();
-
-      for (; cur.lessThan(limit);) {
-         Address k = cur.getAddressAt(addressSize);
-         if (FreeChunk.indicatesFreeChunk(cur)) {
-            if (! cur.equals(regionStart)) {
-               res.add(new MemRegion(regionStart, cur));
-            }
-            FreeChunk fc = (FreeChunk) VMObjectFactory.newObject(FreeChunk.class, cur);
-            long chunkSize = fc.size();
-            if (Assert.ASSERTS_ENABLED) {
-               Assert.that(chunkSize > 0, "invalid FreeChunk size");
-            }
-            // note that fc.size() gives chunk size in heap words
-            cur = cur.addOffsetTo(chunkSize * addressSize);
-            regionStart = cur;
-         } else if (k != null) {
-            Oop obj = heap.newOop(cur.addOffsetToAsOopHandle(0));
-            long objectSize = obj.getObjectSize();
-            cur = cur.addOffsetTo(adjustObjectSizeInBytes(objectSize));
-         } else {
-            // FIXME: need to do a better job here.
-            // can I use bitMap here?
-            //Find the object size using Printezis bits and skip over
-            long size = collector().blockSizeUsingPrintezisBits(cur);
-            if (size == -1) {
-              break;
-            }
-            cur = cur.addOffsetTo(adjustObjectSizeInBytes(size));
-         }
-      }
-      return res;
-   }
-
-   //-- Internals only below this point
-
-   // Unlike corresponding VM code, we operate on byte size rather than
-   // HeapWord size for convenience.
-
-   public static long adjustObjectSizeInBytes(long sizeInBytes) {
-      return Oop.alignObjectSize(Math.max(sizeInBytes, MinChunkSizeInBytes));
-   }
-
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ConcurrentMarkSweepGeneration.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-/*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.shared.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-public class ConcurrentMarkSweepGeneration extends CardGeneration {
-  private static AddressField cmsSpaceField;
-
-  public ConcurrentMarkSweepGeneration(Address addr) {
-    super(addr);
-  }
-
-  static {
-    VM.registerVMInitializedObserver(new Observer() {
-        public void update(Observable o, Object data) {
-          initialize(VM.getVM().getTypeDataBase());
-        }
-      });
-  }
-
-  private static synchronized void initialize(TypeDataBase db) {
-    Type type = db.lookupType("ConcurrentMarkSweepGeneration");
-    cmsSpaceField = type.getAddressField("_cmsSpace");
-  }
-
-  // Accessing space
-  public CompactibleFreeListSpace cmsSpace() {
-    return (CompactibleFreeListSpace) VMObjectFactory.newObject(
-                                 CompactibleFreeListSpace.class,
-                                 cmsSpaceField.getValue(addr));
-  }
-
-  public long capacity()                { return cmsSpace().capacity(); }
-  public long used()                    { return cmsSpace().used(); }
-  public long free()                    { return cmsSpace().free(); }
-  public long contiguousAvailable()     { throw new RuntimeException("not yet implemented"); }
-  public boolean contains(Address p)    { return cmsSpace().contains(p); }
-  public void spaceIterate(SpaceClosure blk, boolean usedOnly) {
-     blk.doSpace(cmsSpace());
-  }
-  public void liveRegionsIterate(LiveRegionsClosure closure) {
-      closure.doLiveRegions(cmsSpace());
-  }
-
-  public Generation.Name kind() {
-    return Generation.Name.CONCURRENT_MARK_SWEEP;
-  }
-
-  public String name() {
-    return "concurrent mark-sweep generation";
-  }
-
-  public void printOn(PrintStream tty) {
-    tty.println(name());
-    cmsSpace().printOn(tty);
-  }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/LinearAllocBlock.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-/*
- * @(#)BinaryTreeDictionary.java
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.runtime.*;
-
-public class LinearAllocBlock extends VMObject {
-   static {
-      VM.registerVMInitializedObserver(new Observer() {
-         public void update(Observable o, Object data) {
-            initialize(VM.getVM().getTypeDataBase());
-         }
-      });
-   }
-
-   private static synchronized void initialize(TypeDataBase db) {
-      Type type = db.lookupType("LinearAllocBlock");
-      word_sizeField= type.getCIntegerField("_word_size");
-   }
-
-   // Fields
-   private static CIntegerField word_sizeField;
-
-   // Accessors
-   public long word_size() {
-      return word_sizeField.getValue(addr);
-   }
-
-   // Constructor
-   public LinearAllocBlock(Address addr) {
-      super(addr);
-   }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/cms/ParNewGeneration.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.gc.cms;
-
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.serial.*;
-import sun.jvm.hotspot.gc.shared.*;
-
-public class ParNewGeneration extends DefNewGeneration {
-  public ParNewGeneration(Address addr) {
-    super(addr);
-  }
-
-  public Generation.Name kind() {
-    return Generation.Name.PAR_NEW;
-  }
-}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GCName.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,7 @@
   PSMarkSweep ("PSMarkSweep"),
   ParallelScavenge ("ParallelScavenge"),
   DefNew ("DefNew"),
-  ParNew ("ParNew"),
   G1New ("G1New"),
-  ConcurrentMarkSweep ("ConcurrentMarkSweep"),
   G1Old ("G1Old"),
   G1Full ("G1Full"),
   Z ("Z"),
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/Generation.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/Generation.java	Thu Nov 14 13:50:03 2019 +0000
@@ -81,9 +81,7 @@
 
     // constants from Generation::Name
     NAME_DEF_NEW = db.lookupIntConstant("Generation::DefNew").intValue();
-    NAME_PAR_NEW = db.lookupIntConstant("Generation::ParNew").intValue();
     NAME_MARK_SWEEP_COMPACT = db.lookupIntConstant("Generation::MarkSweepCompact").intValue();
-    NAME_CONCURRENT_MARK_SWEEP = db.lookupIntConstant("Generation::ConcurrentMarkSweep").intValue();
     NAME_OTHER = db.lookupIntConstant("Generation::Other").intValue();
   }
 
@@ -93,9 +91,7 @@
 
   public static class Name {
     public static final Name DEF_NEW = new Name("DefNew");
-    public static final Name PAR_NEW = new Name("ParNew");
     public static final Name MARK_SWEEP_COMPACT = new Name("MarkSweepCompact");
-    public static final Name CONCURRENT_MARK_SWEEP = new Name("ConcurrentMarkSweep");
     public static final Name OTHER = new Name("Other");
 
     private Name(String value) {
@@ -115,12 +111,8 @@
   static Generation.Name nameForEnum(int value) {
      if (value == NAME_DEF_NEW) {
         return Name.DEF_NEW;
-     } else if (value == NAME_PAR_NEW) {
-        return Name.PAR_NEW;
      } else if (value == NAME_MARK_SWEEP_COMPACT) {
         return Name.MARK_SWEEP_COMPACT;
-     } else if (value == NAME_CONCURRENT_MARK_SWEEP) {
-        return Name.CONCURRENT_MARK_SWEEP;
      } else if (value == NAME_OTHER) {
         return Name.OTHER;
      } else {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenerationFactory.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/GenerationFactory.java	Thu Nov 14 13:50:03 2019 +0000
@@ -27,7 +27,6 @@
 import java.util.*;
 
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.cms.*;
 import sun.jvm.hotspot.gc.serial.*;
 import sun.jvm.hotspot.runtime.*;
 import sun.jvm.hotspot.types.*;
@@ -50,9 +49,7 @@
     ctor = new VirtualConstructor(db);
 
     ctor.addMapping("DefNewGeneration", DefNewGeneration.class);
-    ctor.addMapping("ParNewGeneration", ParNewGeneration.class);
     ctor.addMapping("TenuredGeneration", TenuredGeneration.class);
-    ctor.addMapping("ConcurrentMarkSweepGeneration", ConcurrentMarkSweepGeneration.class);
   }
 
   public static Generation newObject(Address addr) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/FileMapInfo.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -70,7 +70,7 @@
 
     // SpaceInfo
     type = db.lookupType("CDSFileMapRegion");
-    long mdRegionBaseAddressOffset = type.getField("_addr._base").getOffset();
+    long mdRegionBaseAddressOffset = type.getField("_mapped_base").getOffset();
     mdRegionBaseAddress = (mdSpaceValue.addOffsetTo(mdRegionBaseAddressOffset)).getAddressAt(0);
     long mdRegionSizeOffset = type.getField("_used").getOffset();
     long mdRegionSize = (mdSpaceValue.addOffsetTo(mdRegionSizeOffset)).getAddressAt(0).asLongValue();
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Universe.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
 
 import sun.jvm.hotspot.debugger.Address;
 import sun.jvm.hotspot.debugger.OopHandle;
-import sun.jvm.hotspot.gc.cms.CMSHeap;
 import sun.jvm.hotspot.gc.epsilon.EpsilonHeap;
 import sun.jvm.hotspot.gc.g1.G1CollectedHeap;
 import sun.jvm.hotspot.gc.parallel.ParallelScavengeHeap;
@@ -84,7 +83,6 @@
     collectedHeapField = type.getAddressField("_collectedHeap");
 
     heapConstructor = new VirtualConstructor(db);
-    addHeapTypeIfInDB(db, CMSHeap.class);
     addHeapTypeIfInDB(db, SerialHeap.class);
     addHeapTypeIfInDB(db, ParallelScavengeHeap.class);
     addHeapTypeIfInDB(db, G1CollectedHeap.class);
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/Mark.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,11 +73,6 @@
     noHashInPlace       = db.lookupLongConstant("markWord::no_hash_in_place").longValue();
     noLockInPlace       = db.lookupLongConstant("markWord::no_lock_in_place").longValue();
     maxAge              = db.lookupLongConstant("markWord::max_age").longValue();
-
-    /* Constants in markWord used by CMS. */
-    cmsShift            = db.lookupLongConstant("markWord::cms_shift").longValue();
-    cmsMask             = db.lookupLongConstant("markWord::cms_mask").longValue();
-    sizeShift           = db.lookupLongConstant("markWord::size_shift").longValue();
   }
 
   // Field accessors
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/ObjectHeap.java	Thu Nov 14 13:50:03 2019 +0000
@@ -32,7 +32,6 @@
 import java.util.*;
 
 import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.gc.cms.*;
 import sun.jvm.hotspot.gc.shared.*;
 import sun.jvm.hotspot.gc.epsilon.*;
 import sun.jvm.hotspot.gc.g1.*;
@@ -234,16 +233,11 @@
     }
     visitor.prologue(totalSize);
 
-    CompactibleFreeListSpace cmsSpaceOld = null;
     CollectedHeap heap = VM.getVM().getUniverse().heap();
 
     if (heap instanceof GenCollectedHeap) {
       GenCollectedHeap genHeap = (GenCollectedHeap) heap;
       Generation genOld = genHeap.getGen(1);
-      if (genOld instanceof ConcurrentMarkSweepGeneration) {
-          ConcurrentMarkSweepGeneration concGen = (ConcurrentMarkSweepGeneration)genOld;
-          cmsSpaceOld = concGen.cmsSpace();
-      }
     }
 
     for (int i = 0; i < liveRegions.size(); i += 2) {
@@ -265,20 +259,7 @@
             }
           }
           if (obj == null) {
-             //Find the object size using Printezis bits and skip over
-             long size = 0;
-
-             if ( (cmsSpaceOld != null) && cmsSpaceOld.contains(handle) ){
-                 size = cmsSpaceOld.collector().blockSizeUsingPrintezisBits(handle);
-             }
-
-             if (size <= 0) {
-                //Either Printezis bits not set or handle is not in cms space.
-                throw new UnknownOopException();
-             }
-
-             handle = handle.addOffsetToAsOopHandle(CompactibleFreeListSpace.adjustObjectSizeInBytes(size));
-             continue;
+              throw new UnknownOopException();
           }
           if (of == null || of.canInclude(obj)) {
                   if (visitor.doObj(obj)) {
@@ -286,11 +267,8 @@
                           break;
                   }
           }
-          if ( (cmsSpaceOld != null) && cmsSpaceOld.contains(handle)) {
-              handle = handle.addOffsetToAsOopHandle(CompactibleFreeListSpace.adjustObjectSizeInBytes(obj.getObjectSize()) );
-          } else {
-              handle = handle.addOffsetToAsOopHandle(obj.getObjectSize());
-          }
+
+          handle = handle.addOffsetToAsOopHandle(obj.getObjectSize());
         }
       }
       catch (AddressException e) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Thread.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/Thread.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -118,7 +118,6 @@
   public boolean   isHiddenFromExternalView()    { return false; }
   public boolean   isJvmtiAgentThread()          { return false; }
   public boolean   isWatcherThread()             { return false; }
-  public boolean   isConcurrentMarkSweepThread() { return false; }
   public boolean   isServiceThread()             { return false; }
 
   /** Memory operations */
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Nov 14 13:50:03 2019 +0000
@@ -104,7 +104,6 @@
   private int          heapOopSize;
   private int          klassPtrSize;
   private int          oopSize;
-  private final int    IndexSetSize;
   /** -XX flags (value origin) */
   public static int    Flags_DEFAULT;
   public static int    Flags_COMMAND_LINE;
@@ -491,7 +490,6 @@
     Flags_VALUE_ORIGIN_MASK = db.lookupIntConstant("JVMFlag::VALUE_ORIGIN_MASK").intValue();
     Flags_ORIG_COMMAND_LINE = db.lookupIntConstant("JVMFlag::ORIG_COMMAND_LINE").intValue();
     oopSize  = db.lookupIntConstant("oopSize").intValue();
-    IndexSetSize = db.lookupIntConstant("CompactibleFreeListSpace::IndexSetSize").intValue();
 
     intType = db.lookupType("int");
     uintType = db.lookupType("uint");
@@ -711,10 +709,6 @@
     return heapOopSize;
   }
 
-  public int getIndexSetSize() {
-    return IndexSetSize;
-  }
-
   public int getKlassPtrSize() {
     return klassPtrSize;
   }
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64CurrentFrameGuess.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/amd64/AMD64CurrentFrameGuess.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,7 +137,16 @@
                 setValues(curSP, null, pc);
                 return true;
               }
+              Frame oldFrame = frame;
               frame = frame.sender(map);
+              if (frame.getSP().lessThanOrEqual(oldFrame.getSP())) {
+                  // Frame points to itself or to a location in the wrong direction.
+                  // Break the loop and move on to next offset.
+                  if (DEBUG) {
+                      System.out.println("AMD64CurrentFrameGuess.run: frame <= oldFrame: " + frame);
+                  }
+                  break;
+              }
             }
           } catch (Exception e) {
             if (DEBUG) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/x86/X86CurrentFrameGuess.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/x86/X86CurrentFrameGuess.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2006, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -137,7 +137,16 @@
                 setValues(curSP, null, pc);
                 return true;
               }
+              Frame oldFrame = frame;
               frame = frame.sender(map);
+              if (frame.getSP().lessThanOrEqual(oldFrame.getSP())) {
+                  // Frame points to itself or to a location in the wrong direction.
+                  // Break the loop and move on to next offset.
+                  if (DEBUG) {
+                      System.out.println("X86CurrentFrameGuess.run: frame <= oldFrame: " + frame);
+                  }
+                  break;
+              }
             }
           } catch (Exception e) {
             if (DEBUG) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Nov 14 13:50:03 2019 +0000
@@ -160,12 +160,6 @@
           System.out.println("using thread-local object allocation.");
        }
 
-       l = getFlagValue("UseConcMarkSweepGC", flagMap);
-       if (l == 1L) {
-          System.out.println("Concurrent Mark-Sweep GC");
-          return;
-       }
-
        l = getFlagValue("UseParallelGC", flagMap);
        if (l == 1L) {
           System.out.print("Parallel GC ");
--- a/src/jdk.hotspot.agent/share/native/libsaproc/ps_core_common.c	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/share/native/libsaproc/ps_core_common.c	Thu Nov 14 13:50:03 2019 +0000
@@ -261,6 +261,7 @@
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
 #define USE_SHARED_SPACES_SYM "UseSharedSpaces"
+#define SHARED_BASE_ADDRESS_SYM "SharedBaseAddress"
 #define LIBJVM_NAME "/libjvm.so"
 #endif
 
@@ -268,6 +269,7 @@
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE"
 #define USE_SHARED_SPACES_SYM "_UseSharedSpaces"
+#define SHARED_BASE_ADDRESS_SYM "_SharedBaseAddress"
 #define LIBJVM_NAME "/libjvm.dylib"
 #endif
 
@@ -281,7 +283,8 @@
       char classes_jsa[PATH_MAX];
       CDSFileMapHeaderBase header;
       int fd = -1;
-      uintptr_t base = 0, useSharedSpacesAddr = 0;
+      uintptr_t useSharedSpacesAddr = 0;
+      uintptr_t sharedBaseAddressAddr = 0, sharedBaseAddress = 0;
       uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
       jboolean useSharedSpaces = 0;
       int m;
@@ -308,6 +311,17 @@
         return true;
       }
 
+      sharedBaseAddressAddr = lookup_symbol(ph, jvm_name, SHARED_BASE_ADDRESS_SYM);
+      if (sharedBaseAddressAddr == 0) {
+        print_debug("can't lookup 'SharedBaseAddress' flag\n");
+        return false;
+      }
+
+      if (read_pointer(ph, sharedBaseAddressAddr, &sharedBaseAddress) != true) {
+        print_debug("can't read the value of 'SharedBaseAddress' flag\n");
+        return false;
+      }
+
       sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
       if (sharedArchivePathAddrAddr == 0) {
         print_debug("can't lookup shared archive path symbol\n");
@@ -363,16 +377,19 @@
       ph->core->classes_jsa_fd = fd;
       // add read-only maps from classes.jsa to the list of maps
       for (m = 0; m < NUM_CDS_REGIONS; m++) {
-        if (header._space[m]._read_only) {
+        if (header._space[m]._read_only &&
+            !header._space[m]._is_heap_region &&
+            !header._space[m]._is_bitmap_region) {
           // With *some* linux versions, the core file doesn't include read-only mmap'ed
           // files regions, so let's add them here. This is harmless if the core file also
           // include these regions.
-          base = (uintptr_t) header._space[m]._addr._base;
+          uintptr_t base = sharedBaseAddress + (uintptr_t) header._space[m]._mapping_offset;
+          size_t size = header._space[m]._used;
           // no need to worry about the fractional pages at-the-end.
           // possible fractional pages are handled by core_read_data.
           add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
-                                   base, (size_t) header._space[m]._used);
-          print_debug("added a share archive map at 0x%lx\n", base);
+                                   base, size);
+          print_debug("added a share archive map [%d] at 0x%lx (size 0x%lx bytes)\n", m, base, size);
         }
       }
       return true;
--- a/src/jdk.hotspot.agent/solaris/native/libsaproc/saproc.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.hotspot.agent/solaris/native/libsaproc/saproc.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -538,9 +538,11 @@
 }
 
 #define USE_SHARED_SPACES_SYM   "UseSharedSpaces"
+#define SHARED_BASE_ADDRESS_SYM "SharedBaseAddress"
 // mangled symbol name for Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "__1cJArgumentsRSharedArchivePath_"
 
+static uintptr_t sharedBaseAddress = 0;
 static int
 init_classsharing_workaround(void *cd, const prmap_t* pmap, const char* obj_name) {
   Debugger* dbg = (Debugger*) cd;
@@ -577,6 +579,19 @@
     return 1;
   }
 
+  psaddr_t sharedBaseAddressAddr = 0;
+  ps_pglobal_lookup(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM, &sharedBaseAddressAddr);
+  if (sharedBaseAddressAddr == 0) {
+    print_debug("can't find symbol 'SharedBaseAddress'\n");
+    THROW_NEW_DEBUGGER_EXCEPTION_("can't find 'SharedBaseAddress' flag\n", 1);
+  }
+
+  sharedBaseAddress = 0;
+  if (read_pointer(ph, sharedBaseAddressAddr, &sharedBaseAddress) != true) {
+    print_debug("can't read the value of 'SharedBaseAddress' flag\n");
+    THROW_NEW_DEBUGGER_EXCEPTION_("can't get SharedBaseAddress from debuggee", 1);
+  }
+
   char classes_jsa[PATH_MAX];
   psaddr_t sharedArchivePathAddrAddr = 0;
   ps_pglobal_lookup(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM, &sharedArchivePathAddrAddr);
@@ -648,9 +663,14 @@
 
   if (_libsaproc_debug) {
     for (int m = 0; m < NUM_CDS_REGIONS; m++) {
-       print_debug("shared file offset %d mapped at 0x%lx, size = %ld, read only? = %d\n",
-          pheader->_space[m]._file_offset, pheader->_space[m]._addr._base,
-          pheader->_space[m]._used, pheader->_space[m]._read_only);
+      if (!pheader->_space[m]._is_heap_region &&
+          !pheader->_space[m]._is_bitmap_region) {
+        jlong mapping_offset = pheader->_space[m]._mapping_offset;
+        jlong baseAddress = mapping_offset + (jlong)sharedBaseAddress;
+        print_debug("shared file offset %d mapped at 0x%lx, size = %ld, read only? = %d\n",
+                    pheader->_space[m]._file_offset, baseAddress,
+                    pheader->_space[m]._used, pheader->_space[m]._read_only);
+      }
     }
   }
 
@@ -1052,11 +1072,14 @@
         // We can skip the non-read-only maps. These are mapped as MAP_PRIVATE
         // and hence will be read by libproc. Besides, the file copy may be
         // stale because the process might have modified those pages.
-        if (pheader->_space[m]._read_only) {
-          jlong baseAddress = (jlong) (uintptr_t) pheader->_space[m]._addr._base;
-          size_t usedSize = pheader->_space[m]._used;
-          if (address >= baseAddress && address < (baseAddress + usedSize)) {
-            // the given address falls in this shared heap area
+        if (pheader->_space[m]._read_only &&
+            !pheader->_space[m]._is_heap_region &&
+            !pheader->_space[m]._is_bitmap_region) {
+         jlong mapping_offset = (jlong) (uintptr_t) pheader->_space[m]._mapping_offset;
+         jlong baseAddress = mapping_offset + (jlong)sharedBaseAddress;
+         size_t usedSize = pheader->_space[m]._used;
+         if (address >= baseAddress && address < (baseAddress + usedSize)) {
+            // the given address falls in this shared metadata area
             print_debug("found shared map at 0x%lx\n", (long) baseAddress);
 
 
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/TranslatedException.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/TranslatedException.java	Thu Nov 14 13:50:03 2019 +0000
@@ -122,7 +122,7 @@
      * a single exception is:
      *
      * <pre>
-     * <exception class name> '|' <exception message> '|' <stack size> '|' [<class> '|' <method> '|' <file> '|' <line> '|' ]*
+     * <exception class name> '|' <exception message> '|' <stack size> '|' [ <classLoader> '|' <module> '|' <moduleVersion> '|' <class> '|' <method> '|' <file> '|' <line> '|' ]*
      * </pre>
      *
      * Each exception is encoded before the exception it causes.
@@ -149,8 +149,10 @@
                 for (int i = 0; i < stackTrace.length; i++) {
                     StackTraceElement frame = stackTrace[i];
                     if (frame != null) {
-                        enc.format("%s|%s|%s|%d|", frame.getClassName(), frame.getMethodName(),
-                                        encodedString(frame.getFileName()), frame.getLineNumber());
+                        enc.format("%s|%s|%s|%s|%s|%s|%d|", encodedString(frame.getClassLoaderName()),
+                                encodedString(frame.getModuleName()), encodedString(frame.getModuleVersion()),
+                                frame.getClassName(), frame.getMethodName(),
+                                encodedString(frame.getFileName()), frame.getLineNumber());
                     }
                 }
             }
@@ -206,14 +208,26 @@
                 StackTraceElement[] suffix = getStackTraceSuffix();
                 StackTraceElement[] stackTrace = new StackTraceElement[stackTraceDepth + suffix.length];
                 for (int j = 0; j < stackTraceDepth; j++) {
+                    String classLoaderName = parts[i++];
+                    String moduleName = parts[i++];
+                    String moduleVersion = parts[i++];
                     String className = parts[i++];
                     String methodName = parts[i++];
                     String fileName = parts[i++];
                     int lineNumber = Integer.parseInt(parts[i++]);
+                    if (classLoaderName.isEmpty()) {
+                        classLoaderName = null;
+                    }
+                    if (moduleName.isEmpty()) {
+                        moduleName = null;
+                    }
+                    if (moduleVersion.isEmpty()) {
+                        moduleVersion = null;
+                    }
                     if (fileName.isEmpty()) {
                         fileName = null;
                     }
-                    stackTrace[j] = new StackTraceElement(className, methodName, fileName, lineNumber);
+                    stackTrace[j] = new StackTraceElement(classLoaderName, moduleName, moduleVersion, className, methodName, fileName, lineNumber);
                 }
                 System.arraycopy(suffix, 0, stackTrace, stackTraceDepth, suffix.length);
                 throwable.setStackTrace(stackTrace);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java	Thu Nov 14 13:50:03 2019 +0000
@@ -164,7 +164,6 @@
     }
 
     public final boolean useG1GC = getFlag("UseG1GC", Boolean.class);
-    public final boolean useCMSGC = getFlag("UseConcMarkSweepGC", Boolean.class);
 
     public final int allocatePrefetchStyle = getFlag("AllocatePrefetchStyle", Integer.class);
     public final int allocatePrefetchInstr = getFlag("AllocatePrefetchInstr", Integer.class);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalRuntime.java	Thu Nov 14 13:50:03 2019 +0000
@@ -231,8 +231,7 @@
     public enum HotSpotGC {
         // Supported GCs
         Serial(true, "UseSerialGC"),
-        Parallel(true, "UseParallelGC", "UseParallelOldGC", "UseParNewGC"),
-        CMS(true, "UseConcMarkSweepGC"),
+        Parallel(true, "UseParallelGC", "UseParallelOldGC"),
         G1(true, "UseG1GC"),
 
         // Unsupported GCs
--- a/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecording.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.jfr/share/classes/jdk/jfr/internal/PlatformRecording.java	Thu Nov 14 13:50:03 2019 +0000
@@ -484,7 +484,10 @@
         }
         for (FlightRecorderListener cl : PlatformRecorder.getListeners()) {
             try {
-                cl.recordingStateChanged(getRecording());
+                // Skip internal recordings
+                if (recording != null) {
+                    cl.recordingStateChanged(recording);
+                }
             } catch (RuntimeException re) {
                 Logger.log(JFR, WARN, "Error notifying recorder listener:" + re.getMessage());
             }
--- a/src/jdk.security.jgss/share/classes/com/sun/security/sasl/gsskerb/GssKrb5Client.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.security.jgss/share/classes/com/sun/security/sasl/gsskerb/GssKrb5Client.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@
 import java.util.logging.Level;
 import javax.security.sasl.*;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 // JAAS
 import javax.security.auth.callback.CallbackHandler;
 
@@ -150,11 +152,7 @@
         }
 
         if (authzID != null && authzID.length() > 0) {
-            try {
-                this.authzID = authzID.getBytes("UTF8");
-            } catch (IOException e) {
-                throw new SaslException("Cannot encode authorization ID", e);
-            }
+            this.authzID = authzID.getBytes(UTF_8);
         }
     }
 
--- a/src/jdk.security.jgss/share/classes/com/sun/security/sasl/gsskerb/GssKrb5Server.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/src/jdk.security.jgss/share/classes/com/sun/security/sasl/gsskerb/GssKrb5Server.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,8 @@
 import java.util.Map;
 import java.util.logging.Level;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+
 // JAAS
 import javax.security.auth.callback.*;
 
@@ -300,12 +302,8 @@
 
             // Get authorization identity, if any
             if (gssOutToken.length > 4) {
-                try {
-                    authzid = new String(gssOutToken, 4,
-                        gssOutToken.length - 4, "UTF-8");
-                } catch (UnsupportedEncodingException uee) {
-                    throw new SaslException ("Cannot decode authzid", uee);
-                }
+                authzid = new String(gssOutToken, 4,
+                        gssOutToken.length - 4, UTF_8);
             } else {
                 authzid = peer;
             }
--- a/test/hotspot/gtest/gc/z/test_zForwarding.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/gtest/gc/z/test_zForwarding.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -159,7 +159,7 @@
 
     const uint32_t live_objects = size;
     const size_t live_bytes = live_objects * object_size;
-    page.inc_live_atomic(live_objects, live_bytes);
+    page.inc_live(live_objects, live_bytes);
 
     // Setup forwarding
     ZForwarding* const forwarding = ZForwarding::create(&page);
--- a/test/hotspot/gtest/gc/z/test_zLiveMap.cpp	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/gtest/gc/z/test_zLiveMap.cpp	Thu Nov 14 13:50:03 2019 +0000
@@ -35,7 +35,7 @@
     uintptr_t object = 0u;
 
     // Mark the object strong.
-    livemap.set_atomic(object, false /* finalizable */, inc_live);
+    livemap.set(object, false /* finalizable */, inc_live);
 
     // Check that both bits are in the same segment.
     ASSERT_EQ(livemap.index_to_segment(0), livemap.index_to_segment(1));
--- a/test/hotspot/jtreg/ProblemList.txt	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/ProblemList.txt	Thu Nov 14 13:50:03 2019 +0000
@@ -108,7 +108,7 @@
 serviceability/sa/ClhsdbJdis.java 8193639 solaris-all
 serviceability/sa/ClhsdbJhisto.java 8193639,8211767 solaris-all,linux-ppc64le,linux-ppc64
 serviceability/sa/ClhsdbJstack.java 8193639 solaris-all
-serviceability/sa/ClhsdbJstackXcompStress.java 8193639,8231635 solaris-all,windows-x64
+serviceability/sa/ClhsdbJstackXcompStress.java 8193639 solaris-all
 serviceability/sa/ClhsdbLongConstant.java 8193639 solaris-all
 serviceability/sa/ClhsdbPmap.java 8193639,8211767 solaris-all,linux-ppc64le,linux-ppc64
 serviceability/sa/ClhsdbPrintAll.java 8193639 solaris-all
--- a/test/hotspot/jtreg/TEST.ROOT	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/TEST.ROOT	Thu Nov 14 13:50:03 2019 +0000
@@ -47,7 +47,6 @@
     vm.gc.G1 \
     vm.gc.Serial \
     vm.gc.Parallel \
-    vm.gc.ConcMarkSweep \
     vm.gc.Shenandoah \
     vm.gc.Epsilon \
     vm.gc.Z \
--- a/test/hotspot/jtreg/TEST.groups	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/TEST.groups	Thu Nov 14 13:50:03 2019 +0000
@@ -37,7 +37,7 @@
 
 hotspot_compiler_all_gcs = \
   :hotspot_compiler \
-  -:tier1_compiler_not_cms
+  -:tier1_compiler_aot_jvmci
 
 hotspot_gc = \
   gc \
@@ -152,7 +152,7 @@
   compiler/aot \
   compiler/profiling
 
-tier1_compiler_not_cms = \
+tier1_compiler_aot_jvmci = \
   compiler/aot \
   compiler/jvmci
 
@@ -198,7 +198,6 @@
   -gc/logging/TestUnifiedLoggingSwitchStress.java \
   -gc/stress \
   -gc/survivorAlignment/TestPromotionFromSurvivorToTenuredAfterMinorGC.java \
-  -gc/cms/TestMBeanCMS.java \
   -gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
   -gc/shenandoah \
   -gc/nvdimm
@@ -210,13 +209,11 @@
 
 tier1_gc_gcold = \
   gc/stress/gcold/TestGCOldWithG1.java \
-  gc/stress/gcold/TestGCOldWithCMS.java \
   gc/stress/gcold/TestGCOldWithSerial.java \
   gc/stress/gcold/TestGCOldWithParallel.java
 
 tier1_gc_gcbasher = \
   gc/stress/gcbasher/TestGCBasherWithG1.java \
-  gc/stress/gcbasher/TestGCBasherWithCMS.java \
   gc/stress/gcbasher/TestGCBasherWithSerial.java \
   gc/stress/gcbasher/TestGCBasherWithParallel.java
 
@@ -325,6 +322,7 @@
  -runtime/cds/appcds/javaldr/GCSharedStringsDuringDump.java \
  -runtime/cds/appcds/javaldr/HumongousDuringDump.java \
  -runtime/cds/appcds/sharedStrings \
+ -runtime/cds/appcds/ArchiveRelocationTest.java \
  -runtime/cds/appcds/DumpClassList.java \
  -runtime/cds/appcds/ExtraSymbols.java \
  -runtime/cds/appcds/LongClassListPath.java \
@@ -335,6 +333,15 @@
  -runtime/cds/appcds/UnusedCPDuringDump.java \
  -runtime/cds/appcds/VerifierTest_1B.java
 
+hotspot_cds_relocation = \
+  gc/g1/TestSharedArchiveWithPreTouch.java \
+  runtime/cds \
+  runtime/modules/ModulesSymLink.java \
+  runtime/modules/PatchModule/PatchModuleCDS.java \
+  runtime/modules/PatchModule/PatchModuleClassList.java \
+  runtime/NMT \
+  serviceability/sa
+
 # A subset of AppCDS tests to be run in tier1
 tier1_runtime_appcds = \
   runtime/cds/appcds/HelloTest.java \
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/c2/TestBitSetAndReset.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8214239
+ * @summary Missing x86_64.ad patterns for clearing and setting long vector bits
+ *
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
+ *                   -XX:-TieredCompilation -XX:CompileThreshold=1000
+ *                   -XX:CompileCommand=print,compiler/c2/TestBitSetAndReset.test*
+ *                   -XX:CompileCommand=compileonly,compiler/c2/TestBitSetAndReset.test*
+ *                   -XX:CompileCommand=dontinline,compiler/c2/TestBitSetAndReset.test*
+ *                   compiler.c2.TestBitSetAndReset
+ */
+
+package compiler.c2;
+
+public class TestBitSetAndReset {
+    private static final int COUNT = 10_000;
+
+    private static final long MASK63 = 0x8000_0000_0000_0000L;
+    private static final long MASK32 = 0x0000_0001_0000_0000L;
+    private static final long MASK31 = 0x0000_0000_8000_0000L;
+    private static final long MASK15 = 0x0000_0000_0000_8000L;
+    private static final long MASK00 = 0x0000_0000_0000_0001L;
+
+    private static long andq, orq;
+
+    public static void main(String... args) {
+        boolean success = true;
+
+        for (int i=0; i<COUNT; i++) {
+            andq = MASK63 | MASK31 | MASK15 | MASK00;
+            orq = 0;
+            test63();
+            test32();
+            test31();
+            test15();
+            test00();
+            success &= andq == 0 && orq == (MASK63 | MASK32 | MASK31 | MASK15 | MASK00);
+        }
+        if (!success)
+            throw new AssertionError("Failure while setting or clearing long vector bits!");
+    }
+
+    private static void test63() {
+        andq &= ~MASK63;
+        orq |= MASK63;
+    }
+    private static void test32() {
+        andq &= ~MASK32;
+        orq |= MASK32;
+    }
+    private static void test31() {
+        andq &= ~MASK31;
+        orq |= MASK31;
+    }
+    private static void test15() {
+        andq &= ~MASK15;
+        orq |= MASK15;
+    }
+    private static void test00() {
+        andq &= ~MASK00;
+        orq |= MASK00;
+    }
+}
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatiles.java	Thu Nov 14 13:50:03 2019 +0000
@@ -36,8 +36,6 @@
  *                       TestUnsafeVolatileCAE,
  *                       TestUnsafeVolatileGAS}
  * and <testtype> in {G1,
- *                    CMS,
- *                    CMSCondMark,
  *                    Serial,
  *                    Parallel,
  *                    Shenandoah,
@@ -90,18 +88,6 @@
             procArgs = new String[argcount];
             procArgs[argcount - 2] = "-XX:+UseSerialGC";
             break;
-        case "CMS":
-            argcount = 10;
-            procArgs = new String[argcount];
-            procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC";
-            procArgs[argcount - 2] = "-XX:-UseCondCardMark";
-            break;
-        case "CMSCondMark":
-            argcount = 10;
-            procArgs = new String[argcount];
-            procArgs[argcount - 3] = "-XX:+UseConcMarkSweepGC";
-            procArgs[argcount - 2] = "-XX:+UseCondCardMark";
-            break;
         case "Shenandoah":
             argcount = 10;
             procArgs = new String[argcount];
@@ -340,36 +326,6 @@
                     "ret"
                 };
                 break;
-            case "CMSCondMark":
-                // a card mark volatile barrier should be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be elided
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "stlrw?" : "stlr",
-                    "membar_volatile",
-                    "dmb ish",
-                    "storestore \\(elided\\)",
-                    "strb",
-                    "membar_volatile \\(elided\\)",
-                    "ret"
-                };
-                break;
-            case "CMS":
-                // a volatile card mark membar should not be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be
-                // generated as "dmb ishst"
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "stlrw?" : "stlr",
-                    "storestore",
-                    "dmb ishst",
-                    "strb",
-                    "membar_volatile \\(elided\\)",
-                    "ret"
-                };
-                break;
             case "Shenandoah":
             case "ShenandoahTraversal":
                  // Shenandoah generates normal object graphs for
@@ -531,35 +487,6 @@
                     "ret"
                 };
                 break;
-            case "CMSCondMark":
-                // a card mark volatile barrier should be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be elided
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
-                    "membar_volatile",
-                    "dmb ish",
-                    "storestore \\(elided\\)",
-                    "strb",
-                    "membar_acquire \\(elided\\)",
-                    "ret"
-                };
-                break;
-            case "CMS":
-                // a volatile card mark membar should not be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be elided
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
-                    "storestore",
-                    "dmb ishst",
-                    "strb",
-                    "membar_acquire \\(elided\\)",
-                    "ret"
-                };
-                break;
             case "Shenandoah":
             case "ShenandoahTraversal":
                 // For volatile CAS, Shenanodoah generates normal
@@ -736,35 +663,6 @@
                     "ret"
                 };
                 break;
-            case "CMSCondMark":
-                // a card mark volatile barrier should be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be elided
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
-                    "membar_volatile",
-                    "dmb ish",
-                    "storestore \\(elided\\)",
-                    "strb",
-                    "membar_acquire \\(elided\\)",
-                    "ret"
-                };
-                break;
-            case "CMS":
-                // a volatile card mark membar should not be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be elided
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "cmpxchgw?_acq" : "cmpxchg_acq",
-                    "storestore",
-                    "dmb ishst",
-                    "strb",
-                    "membar_acquire \\(elided\\)",
-                    "ret"
-                };
-                break;
             case "Shenandoah":
             case "ShenandoahTraversal":
                 // For volatile CAS, Shenanodoah generates normal
@@ -921,35 +819,6 @@
                     "ret"
                 };
                 break;
-            case "CMSCondMark":
-                // a card mark volatile barrier should be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be elided
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
-                    "membar_volatile",
-                    "dmb ish",
-                    "storestore \\(elided\\)",
-                    "strb",
-                    "membar_acquire \\(elided\\)",
-                    "ret"
-                };
-                break;
-            case "CMS":
-                // a volatile card mark membar should not be generated
-                // before the card mark strb from the StoreCM and the
-                // storestore barrier from the StoreCM should be elided
-                matches = new String[] {
-                    "membar_release \\(elided\\)",
-                    useCompressedOops ? "atomic_xchgw?_acq" : "atomic_xchg_acq",
-                    "storestore",
-                    "dmb ishst",
-                    "strb",
-                    "membar_acquire \\(elided\\)",
-                    "ret"
-                };
-                break;
             case "Shenandoah":
             case "ShenandoahTraversal":
                 matches = new String[] {
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @summary C2 should use ldar, stlr and ldaxr+stlxr insns for volatile operations
- * @library /test/lib /
- *
- * @modules java.base/jdk.internal.misc
- *
- * @requires os.arch=="aarch64" & vm.debug == true &
- *           vm.flavor == "server" & !vm.graal.enabled &
- *           vm.gc.ConcMarkSweep
- *
- * @build compiler.c2.aarch64.TestVolatiles
- *        compiler.c2.aarch64.TestVolatileLoad
- *        compiler.c2.aarch64.TestUnsafeVolatileLoad
- *        compiler.c2.aarch64.TestVolatileStore
- *        compiler.c2.aarch64.TestUnsafeVolatileStore
- *        compiler.c2.aarch64.TestUnsafeVolatileCAS
- *        compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
- *        compiler.c2.aarch64.TestUnsafeVolatileCAE
- *        compiler.c2.aarch64.TestUnsafeVolatileGAS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestVolatileLoad CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestVolatileStore CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestUnsafeVolatileLoad CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestUnsafeVolatileStore CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestUnsafeVolatileCAS CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestUnsafeVolatileWeakCAS CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestUnsafeVolatileCAE CMS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMS
- *      TestUnsafeVolatileGAS CMS
- */
-
-package compiler.c2.aarch64;
-
-public class TestVolatilesCMS {
-    public static void main(String args[]) throws Throwable
-    {
-        // delegate work to shared code
-        new TestVolatiles().runtest(args[0], args[1]);
-    }
-}
--- a/test/hotspot/jtreg/compiler/c2/aarch64/TestVolatilesCMSCondMark.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/*
- * @test
- * @summary C2 should use ldar, stlr and ldaxr+stlxr insns for volatile operations
- * @library /test/lib /
- *
- * @modules java.base/jdk.internal.misc
- *
- * @requires os.arch=="aarch64" & vm.debug == true &
- *           vm.flavor == "server" & !vm.graal.enabled &
- *           vm.gc.ConcMarkSweep
- *
- * @build compiler.c2.aarch64.TestVolatiles
- *        compiler.c2.aarch64.TestVolatileLoad
- *        compiler.c2.aarch64.TestUnsafeVolatileLoad
- *        compiler.c2.aarch64.TestVolatileStore
- *        compiler.c2.aarch64.TestUnsafeVolatileStore
- *        compiler.c2.aarch64.TestUnsafeVolatileCAS
- *        compiler.c2.aarch64.TestUnsafeVolatileWeakCAS
- *        compiler.c2.aarch64.TestUnsafeVolatileCAE
- *        compiler.c2.aarch64.TestUnsafeVolatileGAS
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestVolatileLoad CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestVolatileStore CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestUnsafeVolatileLoad CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestUnsafeVolatileStore CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestUnsafeVolatileCAS CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestUnsafeVolatileWeakCAS CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestUnsafeVolatileCAE CMSCondMark
- *
- * @run driver compiler.c2.aarch64.TestVolatilesCMSCondMark
- *      TestUnsafeVolatileGAS CMSCondMark
- */
-
-package compiler.c2.aarch64;
-
-public class TestVolatilesCMSCondMark {
-    public static void main(String args[]) throws Throwable
-    {
-        // delegate work to shared code
-        new TestVolatiles().runtest(args[0], args[1]);
-    }
-}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/compilercontrol/CompilationModeHighOnlyTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2019, Loongson Technology Co. Ltd. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8233885
+ * @summary CompLevel_initial_compile should be CompLevel_full_optimization for high-only mode
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:CompilationMode=high-only
+ *                   compiler.compilercontrol.CompilationModeHighOnlyTest
+ *
+ */
+
+package compiler.compilercontrol;
+
+public class CompilationModeHighOnlyTest{
+    public static void main(String[] args) {
+        System.out.println("Passed");
+    }
+}
--- a/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/TestTranslatedException.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test/TestTranslatedException.java	Thu Nov 14 13:50:03 2019 +0000
@@ -26,7 +26,6 @@
  * @requires vm.jvmci
  * @modules jdk.internal.vm.ci/jdk.vm.ci.hotspot:open
  * @library /compiler/jvmci/jdk.vm.ci.hotspot.test/src
- * @ignore 8233745
  * @run testng/othervm
  *      -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI -XX:-UseJVMCICompiler
  *      jdk.vm.ci.hotspot.test.TestTranslatedException
@@ -43,15 +42,6 @@
 import org.testng.annotations.Test;
 
 public class TestTranslatedException {
-
-    private static String printToString(Throwable throwable) {
-        ByteArrayOutputStream baos = new ByteArrayOutputStream();
-        try (PrintStream ps = new PrintStream(baos)) {
-            throwable.printStackTrace(ps);
-        }
-        return baos.toString();
-    }
-
     @SuppressWarnings("serial")
     public static class Untranslatable extends RuntimeException {
         public Untranslatable(String message, Throwable cause) {
@@ -74,15 +64,47 @@
         for (int i = 0; i < 10; i++) {
             throwable = new ExceptionInInitializerError(new InvocationTargetException(new RuntimeException(String.valueOf(i), throwable), "invoke"));
         }
-        String before = printToString(throwable);
         String encoding = (String) encode.invoke(null, throwable);
         Throwable decoded = (Throwable) decode.invoke(null, encoding);
-        String after = printToString(decoded);
+        assertThrowableEquals(throwable, decoded);
+    }
 
-        after = after.replace(
-                        "jdk.vm.ci.hotspot.TranslatedException: [java.lang.ClassNotFoundException: jdk/vm/ci/hotspot/test/TestTranslatedException$Untranslatable]",
-                        "jdk.vm.ci.hotspot.test.TestTranslatedException$Untranslatable: test exception");
-
-        Assert.assertEquals(before, after);
+    private static void assertThrowableEquals(Throwable original, Throwable decoded) {
+        try {
+            Assert.assertEquals(original == null, decoded == null);
+            while (original != null) {
+                if (Untranslatable.class.equals(original.getClass())) {
+                    Assert.assertEquals("jdk.vm.ci.hotspot.TranslatedException", decoded.getClass().getName());
+                    Assert.assertEquals("[java.lang.ClassNotFoundException: jdk/vm/ci/hotspot/test/TestTranslatedException$Untranslatable]", decoded.getMessage());
+                    Assert.assertEquals("test exception", original.getMessage());
+                } else {
+                    Assert.assertEquals(original.getClass().getName(), decoded.getClass().getName());
+                    Assert.assertEquals(original.getMessage(), decoded.getMessage());
+                }
+                StackTraceElement[] originalStack = original.getStackTrace();
+                StackTraceElement[] decodedStack = decoded.getStackTrace();
+                Assert.assertEquals(originalStack.length, decodedStack.length);
+                for (int i = 0, n = originalStack.length; i < n; ++i) {
+                    StackTraceElement originalStackElement = originalStack[i];
+                    StackTraceElement decodedStackElement = decodedStack[i];
+                    Assert.assertEquals(originalStackElement.getClassLoaderName(), decodedStackElement.getClassLoaderName());
+                    Assert.assertEquals(originalStackElement.getModuleName(), decodedStackElement.getModuleName());
+                    Assert.assertEquals(originalStackElement.getClassName(), decodedStackElement.getClassName());
+                    Assert.assertEquals(originalStackElement.getMethodName(), decodedStackElement.getMethodName());
+                    Assert.assertEquals(originalStackElement.getFileName(), decodedStackElement.getFileName());
+                    Assert.assertEquals(originalStackElement.getLineNumber(), decodedStackElement.getLineNumber());
+                }
+                original = original.getCause();
+                decoded = decoded.getCause();
+            }
+        } catch (AssertionError e) {
+            System.err.println("original:[");
+            original.printStackTrace(System.err);
+            System.err.println("]");
+            System.err.println("decoded:[");
+            original.printStackTrace(System.err);
+            System.err.println("]");
+            throw e;
+        }
     }
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/TestRemoveMainPostLoops.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8233529
+ * @summary Verify that correct loops are selected when trying to remove main/post.
+ * @run main/othervm -XX:-TieredCompilation -Xbatch
+ *                   -XX:CompileCommand=compileonly,compiler.loopopts.TestRemoveMainPostLoops::test
+ *                   compiler.loopopts.TestRemoveMainPostLoops
+ */
+
+package compiler.loopopts;
+
+public class TestRemoveMainPostLoops {
+    static int cnt1 = 0;
+    int cnt2 = 0;
+
+    void testCallee() {
+        // (5) Only main and post loops are created (no pre loop -> "PeelMainPost") and main is unrolled.
+        for (int i = 0; i < 100; ++i) {
+            // (4) Inner loop is fully unrolled and removed.
+            for (int j = 0; j < 10; ++j) {
+                cnt1 += j;
+            }
+        }
+    }
+
+    void test() {
+        for (int i = 0; i < 10_000; ++i) {
+            // (0) testCallee method is inlined
+            testCallee();
+            cnt2 = 0;
+            // (1) OSR compilation is triggered in this loop.
+            // (2) Pre-/main-/post loops are created.
+            // (3) Main and post loops found empty and removed.
+            // (6) Pre loop is found empty, attempt to remove main and post loop then incorrectly selects main from (5).
+            for (int j = 0; j < 10; ++j) {
+                cnt2 = cnt1 + j;
+            }
+        }
+    }
+
+    public static void main(String[] strArr) {
+        TestRemoveMainPostLoops test = new TestRemoveMainPostLoops();
+        for (int i = 0; i < 100; i++) {
+            cnt1 = 0;
+            test.cnt2 = 0;
+            test.test();
+            if (cnt1 != 45000000 || test.cnt2 != 45000009) {
+                throw new RuntimeException("Incorrect result: " + cnt1 + " " + test.cnt2);
+            }
+        }
+    }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/superword/AlignmentOnePack.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8229694
+ * @summary Tests the case where there is only 1 pack and no operations left when calling SuperWord::find_align_to_ref() to find the best alignment again.
+ *
+ * @run main/othervm -Xbatch -XX:CompileCommand=compileonly,compiler.loopopts.superword.AlignmentOnePack::test
+ *      compiler.loopopts.superword.AlignmentOnePack
+ */
+
+package compiler.loopopts.superword;
+
+public class AlignmentOnePack {
+    static int iFld;
+
+    public static void test(int[] intArr, short[] shortArr) {
+        for (int j = 8; j < intArr.length;j++) {
+            shortArr[10] = 10;
+            shortArr[j] = 30;
+            intArr[7] = 260;
+            intArr[j-1] = 400;
+            iFld = intArr[j];
+        }
+    }
+
+    public static void main(String[] args) throws Exception {
+        int[] a = new int[16];
+        short[] c = new short[16];
+
+        for (int i = 0; i < 10000; i++) {
+            test(a, c);
+        }
+    }
+}
--- a/test/hotspot/jtreg/compiler/loopstripmining/CheckLoopStripMining.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/compiler/loopstripmining/CheckLoopStripMining.java	Thu Nov 14 13:50:03 2019 +0000
@@ -27,16 +27,38 @@
  * @summary C2: LoopStripMining doesn't strip as expected
  * @requires vm.compiler2.enabled
  *
- * @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+SafepointTimeout -XX:+SafepointALot
- *                   -XX:+AbortVMOnSafepointTimeout -XX:SafepointTimeoutDelay=500 -XX:GuaranteedSafepointInterval=500
- *                   -XX:-TieredCompilation -XX:+UseCountedLoopSafepoints -XX:LoopStripMiningIter=1000
- *                   -XX:LoopUnrollLimit=0 -XX:CompileCommand=compileonly,CheckLoopStripMining::test_loop -Xcomp CheckLoopStripMining
- *
+ * @library /test/lib
+ * @run driver compiler.loopstripmining.CheckLoopStripMining
  */
 
-public class CheckLoopStripMining {
+package compiler.loopstripmining;
+
+import jdk.test.lib.Utils;
+import jdk.test.lib.process.ProcessTools;
 
-  public static int test_loop(int x) {
+public class CheckLoopStripMining {
+  public static void main(String args[]) throws Exception {
+    ProcessTools.executeTestJvm(
+        "-XX:+UnlockDiagnosticVMOptions",
+        // to prevent biased locking handshakes from changing the timing of this test
+        "-XX:-UseBiasedLocking",
+        "-XX:+SafepointTimeout",
+        "-XX:+SafepointALot",
+        "-XX:+AbortVMOnSafepointTimeout",
+        "-XX:SafepointTimeoutDelay=" + Utils.adjustTimeout(500),
+        "-XX:GuaranteedSafepointInterval=" + Utils.adjustTimeout(500),
+        "-XX:-TieredCompilation",
+        "-XX:+UseCountedLoopSafepoints",
+        "-XX:LoopStripMiningIter=1000",
+        "-XX:LoopUnrollLimit=0",
+        "-XX:CompileCommand=compileonly,compiler.loopstripmining.CheckLoopStripMining$Test::test_loop",
+        "-Xcomp",
+        Test.class.getName()).shouldHaveExitValue(0)
+                             .stdoutShouldContain("sum: 715827882");
+  }
+
+  public static class Test {
+    public static int test_loop(int x) {
       int sum = 0;
       if (x != 0) {
           for (int y = 1; y < Integer.MAX_VALUE; ++y) {
@@ -44,10 +66,11 @@
           }
       }
       return sum;
-  }
+    }
 
-  public static void main(String args[]) {
-    int sum = test_loop(3);
-    System.out.println("sum: " + sum);
+    public static void main(String args[]) {
+      int sum = test_loop(3);
+      System.out.println("sum: " + sum);
+    }
   }
 }
--- a/test/hotspot/jtreg/compiler/profiling/TestTypeProfiling.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/compiler/profiling/TestTypeProfiling.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,13 @@
   * @test
   * @bug 8189439
   * @summary Parameters type profiling is not performed from aarch64 interpreter
+  *
   * @requires os.arch != "arm"
   * @requires vm.flavor == "server" & vm.compMode == "Xmixed" & !vm.emulatedClient & !vm.graal.enabled
+  *
+  * @comment the test can't be run w/ TieredStopAtLevel < 4
+  * @requires vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4
+  *
   * @library /test/lib /
   * @build sun.hotspot.WhiteBox
   * @run driver ClassFileInstaller sun.hotspot.WhiteBox sun.hotspot.WhiteBox$WhiteBoxPermission
--- a/test/hotspot/jtreg/compiler/tiered/Level2RecompilationTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/compiler/tiered/Level2RecompilationTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -28,6 +28,9 @@
  * @modules java.base/jdk.internal.misc
  *          java.management
  *
+ * @comment the test can't be run w/ TieredStopAtLevel < 4
+ * @requires vm.flavor == "server" & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4)
+ *
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox
  *                                sun.hotspot.WhiteBox$WhiteBoxPermission
--- a/test/hotspot/jtreg/compiler/whitebox/OSRFailureLevel4Test.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/compiler/whitebox/OSRFailureLevel4Test.java	Thu Nov 14 13:50:03 2019 +0000
@@ -27,6 +27,10 @@
  * @library /test/lib /
  * @modules java.base/jdk.internal.misc
  *          java.management
+ *
+ * @comment the test can't be run w/ TieredStopAtLevel < 4
+ * @requires vm.flavor == "server" & (vm.opt.TieredStopAtLevel == null | vm.opt.TieredStopAtLevel == 4)
+ *
  * @build sun.hotspot.WhiteBox
  * @run driver ClassFileInstaller sun.hotspot.WhiteBox
  *                                sun.hotspot.WhiteBox$WhiteBoxPermission
--- a/test/hotspot/jtreg/gc/TestAgeOutput.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestAgeOutput.java	Thu Nov 14 13:50:03 2019 +0000
@@ -48,19 +48,6 @@
  * @run main/othervm -XX:+UseG1GC gc.TestAgeOutput UseG1GC
  */
 
-/*
- * @test TestAgeOutputCMS
- * @bug 8164936
- * @key gc
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @modules java.base/jdk.internal.misc
- * @library /test/lib
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestAgeOutput UseConcMarkSweepGC
- */
-
 import sun.hotspot.WhiteBox;
 
 import java.util.regex.Matcher;
--- a/test/hotspot/jtreg/gc/TestFullGCCount.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestFullGCCount.java	Thu Nov 14 13:50:03 2019 +0000
@@ -26,8 +26,7 @@
 /**
  * @test TestFullGCCount.java
  * @bug 7072527
- * @summary CMS: JMM GC counters overcount in some cases
- * @requires !(vm.gc == "ConcMarkSweep" & vm.opt.ExplicitGCInvokesConcurrent == true)
+ * @summary JMM GC counters overcount in some cases
  * @comment Shenandoah has "ExplicitGCInvokesConcurrent" on by default
  * @requires !(vm.gc == "Shenandoah"    & vm.opt.ExplicitGCInvokesConcurrent != false)
  * @modules java.management
@@ -41,7 +40,7 @@
 import java.util.List;
 
 /*
- * Originally for a specific failure in CMS, this test now monitors all
+ * Originally for a specific failure in CMS[[keep]], this test now monitors all
  * collectors for double-counting of collections.
  */
 public class TestFullGCCount {
--- a/test/hotspot/jtreg/gc/TestGenerationPerfCounter.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestGenerationPerfCounter.java	Thu Nov 14 13:50:03 2019 +0000
@@ -63,19 +63,6 @@
  * @run main/othervm -XX:+UsePerfData -XX:+UseG1GC gc.TestGenerationPerfCounter
  */
 
-/* @test TestGenerationPerfCounterCMS
- * @bug 8080345
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib /
- * @summary Tests that the sun.gc.policy.generations returns 2 for all GCs.
- * @modules java.base/jdk.internal.misc
- *          java.compiler
- *          java.management/sun.management
- *          jdk.internal.jvmstat/sun.jvmstat.monitor
- * @run main/othervm -XX:+UsePerfData -XX:+UseConcMarkSweepGC gc.TestGenerationPerfCounter
- */
-
 public class TestGenerationPerfCounter {
     public static void main(String[] args) throws Exception {
         long numGenerations =
--- a/test/hotspot/jtreg/gc/TestMemoryInitializationWithCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc;
-
-/*
- * @test TestMemoryInitializationWithCMS
- * @key gc
- * @bug 4668531
- * @library /
- * @requires vm.debug & vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Simple test for -XX:+CheckMemoryInitialization doesn't crash VM
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+CheckMemoryInitialization gc.TestMemoryInitializationWithCMS
- */
-
-public class TestMemoryInitializationWithCMS {
-
-    public static void main(String args[]) {
-        TestMemoryInitialization.main(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/TestMemoryMXBeansAndPoolsPresence.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestMemoryMXBeansAndPoolsPresence.java	Thu Nov 14 13:50:03 2019 +0000
@@ -58,16 +58,6 @@
  * @run main/othervm -XX:+UseSerialGC gc.TestMemoryMXBeansAndPoolsPresence Serial
  */
 
-/* @test TestMemoryMXBeansAndPoolsPresenceCMS
- * @bug 8191564
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestMemoryMXBeansAndPoolsPresence CMS
- */
-
 class GCBeanDescription {
     public String name;
     public String[] poolNames;
@@ -108,10 +98,6 @@
                 test(new GCBeanDescription("G1 Young Generation", new String[] {"G1 Eden Space", "G1 Survivor Space", "G1 Old Gen"}),
                      new GCBeanDescription("G1 Old Generation",   new String[] {"G1 Eden Space", "G1 Survivor Space", "G1 Old Gen"}));
                 break;
-            case "CMS":
-                test(new GCBeanDescription("ParNew",              new String[] {"Par Eden Space", "Par Survivor Space"}),
-                     new GCBeanDescription("ConcurrentMarkSweep", new String[] {"Par Eden Space", "Par Survivor Space", "CMS Old Gen"}));
-                break;
             case "Parallel":
                 test(new GCBeanDescription("PS Scavenge",         new String[] {"PS Eden Space", "PS Survivor Space"}),
                      new GCBeanDescription("PS MarkSweep",        new String[] {"PS Eden Space", "PS Survivor Space", "PS Old Gen"}));
--- a/test/hotspot/jtreg/gc/TestNumWorkerOutput.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestNumWorkerOutput.java	Thu Nov 14 13:50:03 2019 +0000
@@ -36,19 +36,6 @@
  * @run main/othervm -XX:+UseG1GC gc.TestNumWorkerOutput UseG1GC
  */
 
-/*
- * @test TestNumWorkerOutputCMS
- * @bug 8165292
- * @key gc
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @modules java.base/jdk.internal.misc
- * @library /test/lib
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestNumWorkerOutput UseConcMarkSweepGC
- */
-
 import sun.hotspot.WhiteBox;
 
 import java.util.regex.Matcher;
--- a/test/hotspot/jtreg/gc/TestPolicyNamePerfCounter.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestPolicyNamePerfCounter.java	Thu Nov 14 13:50:03 2019 +0000
@@ -63,19 +63,6 @@
  * @run main/othervm -XX:+UsePerfData -XX:+UseG1GC gc.TestPolicyNamePerfCounter GarbageFirst
  */
 
-/* @test TestPolicyNamePerfCounterCMS
- * @bug 8210192
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib /
- * @summary Tests that sun.gc.policy.name returns expected values for different GCs.
- * @modules java.base/jdk.internal.misc
- *          java.compiler
- *          java.management/sun.management
- *          jdk.internal.jvmstat/sun.jvmstat.monitor
- * @run main/othervm -XX:+UsePerfData -XX:+UseConcMarkSweepGC gc.TestPolicyNamePerfCounter ParNew:CMS
- */
-
 public class TestPolicyNamePerfCounter {
     public static void main(String[] args) throws Exception {
         if (args.length != 1) {
--- a/test/hotspot/jtreg/gc/TestSmallHeap.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestSmallHeap.java	Thu Nov 14 13:50:03 2019 +0000
@@ -91,12 +91,8 @@
             noneGCSupported = false;
             verifySmallHeapSize("-XX:+UseG1GC", expectedMaxHeap);
         }
-        if (GC.ConcMarkSweep.isSupported()) {
-            noneGCSupported = false;
-            verifySmallHeapSize("-XX:+UseConcMarkSweepGC", expectedMaxHeap);
-        }
         if (noneGCSupported) {
-            throw new SkippedException("Skipping test because none of Parallel/Serial/G1/ConcMarkSweep is supported.");
+            throw new SkippedException("Skipping test because none of Parallel/Serial/G1 is supported.");
         }
     }
 
--- a/test/hotspot/jtreg/gc/TestSystemGC.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/TestSystemGC.java	Thu Nov 14 13:50:03 2019 +0000
@@ -52,15 +52,6 @@
  */
 
 /*
- * @test TestSystemGCCMS
- * @key gc
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @run main/othervm -XX:+UseConcMarkSweepGC gc.TestSystemGC
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent gc.TestSystemGC
- */
-
-/*
  * @test TestSystemGCShenandoah
  * @key gc
  * @requires vm.gc.Shenandoah & !vm.graal.enabled
--- a/test/hotspot/jtreg/gc/arguments/GCTypes.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/GCTypes.java	Thu Nov 14 13:50:03 2019 +0000
@@ -70,7 +70,6 @@
 
     public static enum YoungGCType implements GCType {
         DefNew("Copy"),
-        ParNew("ParNew"),
         PSNew("PS Scavenge"),
         G1("G1 Young Generation");
 
@@ -95,7 +94,6 @@
 
     public static enum OldGCType implements GCType {
         Serial("MarkSweepCompact"),
-        CMS("ConcurrentMarkSweep"),
         PSOld("PS MarkSweep"),
         G1("G1 Old Generation");
 
--- a/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestAlignmentToUseLargePages.java	Thu Nov 14 13:50:03 2019 +0000
@@ -54,16 +54,6 @@
  */
 
 /**
- * @test TestAlignmentToUseLargePagesCMS
- * @key gc regression
- * @bug 8024396
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @run main/othervm -Xms71M -Xmx91M -XX:+UseConcMarkSweepGC -XX:+UseLargePages gc.arguments.TestAlignmentToUseLargePages
- * @run main/othervm -Xms71M -Xmx91M -XX:+UseConcMarkSweepGC -XX:-UseLargePages gc.arguments.TestAlignmentToUseLargePages
- */
-
-/**
  * @test TestAlignmentToUseLargePagesShenandoah
  * @key gc
  * @bug 8024396
--- a/test/hotspot/jtreg/gc/arguments/TestCMSHeapSizeFlags.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.arguments;
-
-/*
- * @test TestCMSHeapSizeFlags
- * @key gc
- * @bug 8006088
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Tests argument processing for initial and maximum heap size for the CMS collector
- * @library /test/lib
- * @library /
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm gc.arguments.TestCMSHeapSizeFlags
- * @author thomas.schatzl@oracle.com
- */
-
-public class TestCMSHeapSizeFlags {
-
-  public static void main(String args[]) throws Exception {
-    final String gcName = "-XX:+UseConcMarkSweepGC";
-
-    TestMaxHeapSizeTools.checkMinInitialMaxHeapFlags(gcName);
-
-    TestMaxHeapSizeTools.checkGenMaxHeapErgo(gcName);
-  }
-}
-
--- a/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestDisableDefaultGC.java	Thu Nov 14 13:50:03 2019 +0000
@@ -36,7 +36,6 @@
  * @run driver gc.arguments.TestDisableDefaultGC
  */
 
-import jdk.test.lib.process.ProcessTools;
 import jdk.test.lib.process.OutputAnalyzer;
 
 public class TestDisableDefaultGC {
@@ -45,7 +44,6 @@
         ProcessBuilder pb = GCArguments.createJavaProcessBuilder("-XX:-UseSerialGC",
                                                                  "-XX:-UseParallelGC",
                                                                  "-XX:-UseG1GC",
-                                                                 "-XX:-UseConcMarkSweepGC",
                                                                  "-XX:+UnlockExperimentalVMOptions",
                                                                  "-XX:-UseShenandoahGC",
                                                                  "-XX:-UseZGC",
--- a/test/hotspot/jtreg/gc/arguments/TestMaxNewSize.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestMaxNewSize.java	Thu Nov 14 13:50:03 2019 +0000
@@ -68,19 +68,6 @@
  * @author thomas.schatzl@oracle.com, jesper.wilhelmsson@oracle.com
  */
 
-/*
- * @test TestMaxNewSizeCMS
- * @key gc
- * @bug 7057939
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @library /
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @run main gc.arguments.TestMaxNewSize -XX:+UseConcMarkSweepGC
- */
-
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
--- a/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestNewRatioFlag.java	Thu Nov 14 13:50:03 2019 +0000
@@ -101,7 +101,6 @@
             int expectedRatio = Integer.valueOf(args[0]);
             switch (GCTypes.YoungGCType.getYoungGCType()) {
                 case DefNew:
-                case ParNew:
                     verifyDefNewNewRatio(expectedRatio);
                     break;
                 case PSNew:
--- a/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestNewSizeFlags.java	Thu Nov 14 13:50:03 2019 +0000
@@ -306,7 +306,6 @@
         public static long alignGenSize(long value) {
             switch (YOUNG_GC_TYPE) {
                 case DefNew:
-                case ParNew:
                     return HeapRegionUsageTool.alignDown(value, HEAP_SPACE_ALIGNMENT);
                 case PSNew:
                     return HeapRegionUsageTool.alignUp(HeapRegionUsageTool.alignDown(value,
--- a/test/hotspot/jtreg/gc/arguments/TestParallelGCThreads.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestParallelGCThreads.java	Thu Nov 14 13:50:03 2019 +0000
@@ -80,7 +80,7 @@
   }
 
   public static void testFlags() throws Exception {
-    // For each parallel collector (G1, Parallel, ParNew/CMS)
+    // For each parallel collector (G1, Parallel)
     List<String> supportedGC = new ArrayList<String>();
 
     if (GC.G1.isSupported()) {
@@ -89,12 +89,9 @@
     if (GC.Parallel.isSupported()) {
       supportedGC.add("Parallel");
     }
-    if (GC.ConcMarkSweep.isSupported()) {
-      supportedGC.add("ConcMarkSweep");
-    }
 
     if (supportedGC.isEmpty()) {
-      throw new SkippedException("Skipping test because none of G1/Parallel/ConcMarkSweep is supported.");
+      throw new SkippedException("Skipping test because none of G1/Parallel is supported.");
     }
 
     for (String gc : supportedGC) {
--- a/test/hotspot/jtreg/gc/arguments/TestParallelRefProc.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestParallelRefProc.java	Thu Nov 14 13:50:03 2019 +0000
@@ -51,10 +51,6 @@
             noneGCSupported = false;
             testFlag(new String[] { "-XX:+UseSerialGC" }, false);
         }
-        if (GC.ConcMarkSweep.isSupported()) {
-            noneGCSupported = false;
-            testFlag(new String[] { "-XX:+UseConcMarkSweepGC" }, false);
-        }
         if (GC.Parallel.isSupported()) {
             noneGCSupported = false;
             testFlag(new String[] { "-XX:+UseParallelGC" }, false);
@@ -66,7 +62,7 @@
             testFlag(new String[] { "-XX:+UseG1GC", "-XX:-ParallelRefProcEnabled", "-XX:ParallelGCThreads=2" }, false);
         }
         if (noneGCSupported) {
-            throw new SkippedException("Skipping test because none of Serial/ConcMarkSweep/Parallel/G1 is supported.");
+            throw new SkippedException("Skipping test because none of Serial/Parallel/G1 is supported.");
         }
     }
 
--- a/test/hotspot/jtreg/gc/arguments/TestSelectDefaultGC.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestSelectDefaultGC.java	Thu Nov 14 13:50:03 2019 +0000
@@ -64,8 +64,6 @@
         assertVMOption(output, "UseG1GC",            isServer);
         // Serial is default for non-server class machines
         assertVMOption(output, "UseSerialGC",        !isServer);
-        // CMS is never default
-        assertVMOption(output, "UseConcMarkSweepGC", false);
     }
 
     public static void main(String[] args) throws Exception {
--- a/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestSurvivorRatioFlag.java	Thu Nov 14 13:50:03 2019 +0000
@@ -126,7 +126,6 @@
             GCTypes.YoungGCType type = GCTypes.YoungGCType.getYoungGCType();
             switch (type) {
                 case DefNew:
-                case ParNew:
                     verifyDefNewSurvivorRatio(expectedRatio);
                     break;
                 case PSNew:
--- a/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/arguments/TestUseCompressedOopsErgo.java	Thu Nov 14 13:50:03 2019 +0000
@@ -73,22 +73,6 @@
  */
 
 /*
- * @test TestUseCompressedOopsErgoCMS
- * @key gc
- * @bug 8010722
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @library /
- * @modules java.base/jdk.internal.misc
- *          java.management/sun.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm gc.arguments.TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
- */
-
-/*
  * @test TestUseCompressedOopsErgoShenandoah
  * @key gc
  * @bug 8010722
--- a/test/hotspot/jtreg/gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.class_unloading;
-
-/*
- * @test
- * @key gc
- * @bug 8049831
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run driver gc.class_unloading.TestCMSClassUnloadingEnabledHWM
- * @summary Test that -XX:-CMSClassUnloadingEnabled will trigger a Full GC when more than MetaspaceSize metadata is allocated.
- */
-
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.process.ProcessTools;
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import sun.hotspot.WhiteBox;
-
-public class TestCMSClassUnloadingEnabledHWM {
-  private static long MetaspaceSize = 32 * 1024 * 1024;
-  private static long YoungGenSize  = 32 * 1024 * 1024;
-
-  private static OutputAnalyzer run(boolean enableUnloading) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-      "-Xbootclasspath/a:.",
-      "-XX:+UnlockDiagnosticVMOptions",
-      "-XX:+WhiteBoxAPI",
-      "-Xmx128m",
-      "-XX:CMSMaxAbortablePrecleanTime=1",
-      "-XX:CMSWaitDuration=50",
-      "-XX:MetaspaceSize=" + MetaspaceSize,
-      "-Xmn" + YoungGenSize,
-      "-XX:+UseConcMarkSweepGC",
-      "-XX:" + (enableUnloading ? "+" : "-") + "CMSClassUnloadingEnabled",
-      "-Xlog:gc",
-      TestCMSClassUnloadingEnabledHWM.AllocateBeyondMetaspaceSize.class.getName(),
-      "" + MetaspaceSize);
-    return new OutputAnalyzer(pb.start());
-  }
-
-  public static OutputAnalyzer runWithCMSClassUnloading() throws Exception {
-    return run(true);
-  }
-
-  public static OutputAnalyzer runWithoutCMSClassUnloading() throws Exception {
-    return run(false);
-  }
-
-  public static void testWithoutCMSClassUnloading() throws Exception {
-    // -XX:-CMSClassUnloadingEnabled is used, so we expect a full GC instead of a concurrent cycle.
-    OutputAnalyzer out = runWithoutCMSClassUnloading();
-
-    out.shouldMatch(".*Pause Full.*");
-    out.shouldNotMatch(".*Pause Initial Mark.*");
-  }
-
-  public static void testWithCMSClassUnloading() throws Exception {
-    // -XX:+CMSClassUnloadingEnabled is used, so we expect a concurrent cycle instead of a full GC.
-    OutputAnalyzer out = runWithCMSClassUnloading();
-
-    out.shouldMatch(".*Pause Initial Mark.*");
-    out.shouldNotMatch(".*Pause Full.*");
-  }
-
-  public static void main(String args[]) throws Exception {
-    testWithCMSClassUnloading();
-    testWithoutCMSClassUnloading();
-  }
-
-  public static class AllocateBeyondMetaspaceSize {
-    public static void main(String [] args) throws Exception {
-      if (args.length != 1) {
-        throw new IllegalArgumentException("Usage: <MetaspaceSize>");
-      }
-
-      WhiteBox wb = WhiteBox.getWhiteBox();
-
-      // Allocate past the MetaspaceSize limit.
-      long metaspaceSize = Long.parseLong(args[0]);
-      long allocationBeyondMetaspaceSize  = metaspaceSize * 2;
-      long metaspace = wb.allocateMetaspace(null, allocationBeyondMetaspaceSize);
-
-      // Wait for at least one GC to occur. The caller will parse the log files produced.
-      GarbageCollectorMXBean cmsGCBean = getCMSGCBean();
-      while (cmsGCBean.getCollectionCount() == 0) {
-        Thread.sleep(100);
-      }
-
-      wb.freeMetaspace(null, metaspace, metaspace);
-    }
-
-    private static GarbageCollectorMXBean getCMSGCBean() {
-      for (GarbageCollectorMXBean gcBean : ManagementFactory.getGarbageCollectorMXBeans()) {
-        if (gcBean.getObjectName().toString().equals("java.lang:type=GarbageCollector,name=ConcurrentMarkSweep")) {
-          return gcBean;
-        }
-      }
-      return null;
-    }
-  }
-}
-
--- a/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/class_unloading/TestClassUnloadingDisabled.java	Thu Nov 14 13:50:03 2019 +0000
@@ -81,24 +81,6 @@
  */
 
 /*
- * @test TestClassUnloadingDisabledCMS
- * @key gc
- * @bug 8114823
- * @comment Graal does not support CMS
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @requires vm.opt.ClassUnloading != true
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- *                              sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
- *                   -XX:-ClassUnloading -XX:+UseConcMarkSweepGC gc.class_unloading.TestClassUnloadingDisabled
- */
-
-/*
  * @test TestClassUnloadingDisabledShenandoah
  * @key gc
  * @bug 8114823
--- a/test/hotspot/jtreg/gc/cms/DisableResizePLAB.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-import static java.lang.ref.Reference.reachabilityFence;
-
-/*
- * @test DisableResizePLAB
- * @key gc
- * @bug 8060467
- * @author filipp.zhinkin@oracle.com, john.coomes@oracle.com
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Run CMS with PLAB resizing disabled and a small OldPLABSize
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:-ResizePLAB -XX:OldPLABSize=1k -Xmx256m -Xlog:gc=debug gc.cms.DisableResizePLAB
- */
-
-public class DisableResizePLAB {
-    public static void main(String args[]) throws Exception {
-        Object garbage[] = new Object[1_000];
-        for (int i = 0; i < garbage.length; i++) {
-            garbage[i] = new byte[0];
-        }
-        long startTime = System.currentTimeMillis();
-        while (System.currentTimeMillis() - startTime < 10_000) {
-            reachabilityFence(new byte[1024]);
-        }
-    }
-}
--- a/test/hotspot/jtreg/gc/cms/GuardShrinkWarning.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/**
- * @test GuardShrinkWarning
- * @key gc regression
- * @summary Remove warning about CMS generation shrinking.
- * @bug 8012111
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @run main/othervm gc.cms.GuardShrinkWarning
- * @author jon.masamitsu@oracle.com
- */
-
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.process.ProcessTools;
-
-public class GuardShrinkWarning {
-  public static void main(String args[]) throws Exception {
-
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
-      "-showversion",
-      "-XX:+UseConcMarkSweepGC",
-      "-XX:+ExplicitGCInvokesConcurrent",
-      SystemGCCaller.class.getName()
-      );
-
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-
-    output.shouldNotContain("Shrinking of CMS not yet implemented");
-
-    output.shouldNotContain("error");
-
-    output.shouldHaveExitValue(0);
-  }
-  static class SystemGCCaller {
-    public static void main(String [] args) {
-      System.gc();
-    }
-  }
-}
--- a/test/hotspot/jtreg/gc/cms/TestBubbleUpRef.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-import java.lang.ref.ReferenceQueue;
-import java.lang.ref.WeakReference;
-import java.util.LinkedList;
-import java.util.ListIterator;
-
-/*
- * @test
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @key cte_test
- * @bug 4950157
- * @summary Stress the behavior of ergonomics when the heap is nearly full and
- *          stays nearly full.
- * @run main/othervm
- *  -XX:+UseConcMarkSweepGC -XX:-CMSYield -XX:-CMSPrecleanRefLists1
- *  -XX:CMSInitiatingOccupancyFraction=0 -Xmx80m gc.cms.TestBubbleUpRef 16000 50 10000
- */
-
-/**
- * Test program to stress the behavior of ergonomics when the
- * heap is nearly full and stays nearly full.
- * This is a test to catch references that have been discovered
- * during concurrent marking and whose referents have been
- * cleared by the mutator.
- * Allocate objects with weak references until the heap is full
- * Free the objects.
- * Do work so that concurrent marking has a chance to work
- * Clear the referents out of the weak references
- * System.gc() in the hopes that it will acquire the collection
- * Free the weak references
- * Do it again.
- *
- * Use the following VM options
- *     -Xmx80m -XX:-CMSYield [-XX:+UseConcMarkSweepGC] -XX:-CMSPrecleanRefLists1
- *      -XX:CMSInitiatingOccupancyFraction=0
- *
- * Use parameter:
- *     args[0] - array size  (16000)
- *     args[1] - iterations  (50)
- *     args[2] - work        (10000)
- */
-class MyList extends LinkedList {
-
-    int[] a;
-
-    MyList(int size) {
-        a = new int[size];
-    }
-}
-
-class MyRefList extends LinkedList {
-
-    WeakReference ref;
-
-    MyRefList(Object o, ReferenceQueue rq) {
-        ref = new WeakReference(o, rq);
-    }
-
-    void clearReferent() {
-        ref.clear();
-    }
-}
-
-public class TestBubbleUpRef {
-
-    MyList list;
-    MyRefList refList;
-    ReferenceQueue rq;
-    int refListLen;
-    int arraySize;
-    int iterations;
-    int workUnits;
-
-    TestBubbleUpRef(int as, int cnt, int wk) {
-        arraySize = as;
-        iterations = cnt;
-        workUnits = wk;
-        list = new MyList(arraySize);
-        refList = new MyRefList(list, rq);
-    }
-
-    public void fill() {
-        System.out.println("fill() " + iterations + " times");
-        int count = 0;
-        while (true) {
-            try {
-                // Allocations
-                MyList next = new MyList(arraySize);
-                list.add(next);
-                MyRefList nextRef = new MyRefList(next, rq);
-                refList.add(nextRef);
-            } catch (OutOfMemoryError e) {
-                // When the heap is full
-                try {
-                    if (count++ > iterations) {
-                        return;
-                    }
-                    System.out.println("Freeing list");
-                    while (!list.isEmpty()) {
-                        list.removeFirst();
-                    }
-                    System.out.println("Doing work");
-                    int j = 0;
-                    for (int i = 1; i < workUnits; i++) {
-                        j = j + i;
-                    }
-                    System.out.println("Clearing refs");
-                    ListIterator listIt = refList.listIterator();
-                    while (listIt.hasNext()) {
-                        MyRefList next = (MyRefList) listIt.next();
-                        next.clearReferent();
-                    }
-                    System.gc();
-                    System.out.println("Freeing refs");
-                    while (!refList.isEmpty()) {
-                        refList.removeFirst();
-                    }
-                } catch (OutOfMemoryError e2) {
-                    System.err.println("Out of Memory - 2 ");
-                    continue;
-                }
-            } catch (Exception e) {
-                System.err.println("Unexpected exception: " + e);
-                return;
-            }
-        }
-    }
-
-    /**
-     * Test entry point.
-     *     args[0] - array size  (is the size of the int array in a list item)
-     *     args[1] - iterations  (is the number of out-of-memory exceptions before exit)
-     *     args[2] - work        (is the work done between allocations)
-     * @param args
-     */
-    public static void main(String[] args) {
-        // Get the input parameters.
-        if (args.length != 3) {
-            throw new IllegalArgumentException("Wrong number of input argumets");
-        }
-
-        int as = Integer.parseInt(args[0]);
-        int cnt = Integer.parseInt(args[1]);
-        int work = Integer.parseInt(args[2]);
-
-        System.out.println("<array size> " + as + "\n"
-                + "<OOM's> " + cnt + "\n"
-                + "<work units> " + work + "\n");
-
-        // Initialization
-        TestBubbleUpRef b = new TestBubbleUpRef(as, cnt, work);
-
-        // Run the test
-        try {
-            b.fill();
-        } catch (OutOfMemoryError e) {
-            b = null; // Free memory before trying to print anything
-            System.err.println("Out of Memory - exiting ");
-        } catch (Exception e) {
-            System.err.println("Exiting ");
-        }
-    }
-}
-
--- a/test/hotspot/jtreg/gc/cms/TestCMSScavengeBeforeRemark.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/*
- * @test TestCMSScavengeBeforeRemark
- * @key gc
- * @bug 8139868
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Run CMS with CMSScavengeBeforeRemark
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+CMSScavengeBeforeRemark -XX:+ExplicitGCInvokesConcurrent -Xmx256m -Xlog:gc=debug gc.cms.TestCMSScavengeBeforeRemark
- */
-
-public class TestCMSScavengeBeforeRemark {
-    public static void main(String args[]) throws Exception {
-        System.gc();
-    }
-}
--- a/test/hotspot/jtreg/gc/cms/TestCriticalPriority.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/*
- * @test TestCriticalPriority
- * @key gc
- * @bug 8217378
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Test critical priority is accepted
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:+UnlockExperimentalVMOptions -XX:+UseCriticalCMSThreadPriority gc.cms.TestCriticalPriority
- */
-
-public class TestCriticalPriority {
-    public static void main(String args[]) throws Exception {
-        // The failure would be detected before entering main().
-    }
-}
--- a/test/hotspot/jtreg/gc/cms/TestMBeanCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.cms;
-
-/*
- * @test TestMBeanCMS.java
- * @bug 6581734
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary CMS Old Gen's collection usage is zero after GC which is incorrect
- * @modules java.management
- * @run main/othervm -Xmx512m -verbose:gc -XX:+UseConcMarkSweepGC gc.cms.TestMBeanCMS
- *
- */
-
-import java.lang.management.GarbageCollectorMXBean;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryPoolMXBean;
-import java.util.LinkedList;
-import java.util.List;
-
-// 6581734 states that memory pool usage via the mbean is wrong
-// for CMS (zero, even after a collection).
-//
-// 6580448 states that the collection count similarly is wrong
-// (stays at zero for CMS collections)
-// -- closed as dup of 6581734 as the same fix resolves both.
-
-
-public class TestMBeanCMS {
-
-    private String poolName = "CMS";
-    private String collectorName = "ConcurrentMarkSweep";
-
-    public static void main(String [] args) {
-
-        TestMBeanCMS t = null;
-        if (args.length==2) {
-            t = new TestMBeanCMS(args[0], args[1]);
-        } else {
-            System.out.println("Defaulting to monitor CMS pool and collector.");
-            t = new TestMBeanCMS();
-        }
-        t.run();
-    }
-
-    public TestMBeanCMS(String pool, String collector) {
-        poolName = pool;
-        collectorName = collector;
-    }
-
-    public TestMBeanCMS() {
-    }
-
-    public void run() {
-        // Use some memory, enough that we expect collections should
-        // have happened.
-        // Must run with options to ensure no stop the world full GC,
-        // but e.g. at least one CMS cycle.
-        allocationWork(300*1024*1024);
-        System.out.println("Done allocationWork");
-
-        // Verify some non-zero results are stored.
-        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
-        int poolsFound = 0;
-        int poolsWithStats = 0;
-        for (int i=0; i<pools.size(); i++) {
-            MemoryPoolMXBean pool = pools.get(i);
-            String name = pool.getName();
-            System.out.println("found pool: " + name);
-
-            if (name.contains(poolName)) {
-                long usage = pool.getCollectionUsage().getUsed();
-                System.out.println(name + ": usage after GC = " + usage);
-                poolsFound++;
-                if (usage > 0) {
-                    poolsWithStats++;
-                }
-            }
-        }
-        if (poolsFound == 0) {
-            throw new RuntimeException("No matching memory pools found: test with -XX:+UseConcMarkSweepGC");
-        }
-
-        List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
-        int collectorsFound = 0;
-        int collectorsWithTime= 0;
-        for (int i=0; i<collectors.size(); i++) {
-            GarbageCollectorMXBean collector = collectors.get(i);
-            String name = collector.getName();
-            System.out.println("found collector: " + name);
-            if (name.contains(collectorName)) {
-                collectorsFound++;
-                System.out.println(name + ": collection count = "
-                                   + collector.getCollectionCount());
-                System.out.println(name + ": collection time  = "
-                                   + collector.getCollectionTime());
-                if (collector.getCollectionCount() <= 0) {
-                    throw new RuntimeException("collection count <= 0");
-                }
-                if (collector.getCollectionTime() > 0) {
-                    collectorsWithTime++;
-                }
-            }
-        }
-        // verify:
-        if (poolsWithStats < poolsFound) {
-            throw new RuntimeException("pools found with zero stats");
-        }
-
-        if (collectorsWithTime<collectorsFound) {
-            throw new RuntimeException("collectors found with zero time");
-        }
-        System.out.println("Test passed.");
-    }
-
-    public void allocationWork(long target) {
-
-        long sizeAllocated = 0;
-        List<byte[]> list = new LinkedList<>();
-        long delay = 50;
-        long count = 0;
-
-        while (sizeAllocated < target) {
-            int size = 1024*1024;
-            byte [] alloc = new byte[size];
-            if (count % 2 == 0) {
-                list.add(alloc);
-                sizeAllocated+=size;
-                System.out.print(".");
-            }
-            try { Thread.sleep(delay); } catch (InterruptedException ie) { }
-            count++;
-        }
-    }
-
-}
--- a/test/hotspot/jtreg/gc/concurrent_phase_control/TestConcurrentPhaseControlCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.concurrent_phase_control;
-
-/*
- * @test TestConcurrentPhaseControlCMS
- * @bug 8169517
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Verify CMS GC doesn't support WhiteBox concurrent phase control.
- * @key gc
- * @modules java.base
- * @library /test/lib /
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- *    sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run main/othervm -XX:+UseConcMarkSweepGC
- *   -Xbootclasspath/a:.
- *   -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
- *   gc.concurrent_phase_control.TestConcurrentPhaseControlCMS
- */
-
-import gc.concurrent_phase_control.CheckUnsupported;
-
-public class TestConcurrentPhaseControlCMS {
-
-    public static void main(String[] args) throws Exception {
-        CheckUnsupported.check("CMS");
-    }
-}
--- a/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/ergonomics/TestDynamicNumberOfGCThreads.java	Thu Nov 14 13:50:03 2019 +0000
@@ -44,11 +44,6 @@
   public static void main(String[] args) throws Exception {
     boolean noneGCSupported = true;
 
-    if (GC.ConcMarkSweep.isSupported()) {
-      noneGCSupported = false;
-      testDynamicNumberOfGCThreads("UseConcMarkSweepGC");
-    }
-
     if (GC.G1.isSupported()) {
       noneGCSupported = false;
       testDynamicNumberOfGCThreads("UseG1GC");
@@ -65,7 +60,7 @@
     }
 
     if (noneGCSupported) {
-      throw new SkippedException("Skipping test because none of ConcMarkSweep/G1/Parallel/Shenandoah is supported.");
+      throw new SkippedException("Skipping test because none of G1/Parallel/Shenandoah is supported.");
     }
   }
 
--- a/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/ergonomics/TestInitialGCThreadLogging.java	Thu Nov 14 13:50:03 2019 +0000
@@ -44,11 +44,6 @@
   public static void main(String[] args) throws Exception {
     boolean noneGCSupported = true;
 
-    if (GC.ConcMarkSweep.isSupported()) {
-      noneGCSupported = false;
-      testInitialGCThreadLogging("UseConcMarkSweepGC", "GC Thread");
-    }
-
     if (GC.G1.isSupported()) {
       noneGCSupported = false;
       testInitialGCThreadLogging("UseG1GC", "GC Thread");
@@ -65,7 +60,7 @@
     }
 
     if (noneGCSupported) {
-      throw new SkippedException("Skipping test because none of ConcMarkSweep/G1/Parallel/Shenandoah is supported.");
+      throw new SkippedException("Skipping test because none of G1/Parallel/Shenandoah is supported.");
     }
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/gc/g1/numa/TestG1NUMATouchRegions.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package gc.g1;
+
+/**
+ * @test TestG1NUMATouchRegions
+ * @summary Ensure the bottom of the given heap regions are properly touched with requested NUMA id.
+ * @key gc
+ * @requires vm.gc.G1
+ * @requires os.family == "linux"
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ *          java.management
+ * @build sun.hotspot.WhiteBox
+ * @run driver ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -XX:+UseG1GC -Xbootclasspath/a:. -XX:+UseNUMA -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI gc.g1.TestG1NUMATouchRegions
+ */
+
+import java.util.LinkedList;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+import sun.hotspot.WhiteBox;
+
+public class TestG1NUMATouchRegions {
+    enum NUMASupportStatus {
+        NOT_CHECKED,
+        SUPPORT,
+        NOT_SUPPORT
+    };
+
+    static int G1HeapRegionSize1MB = 1;
+    static int G1HeapRegionSize8MB = 8;
+
+    static NUMASupportStatus status = NUMASupportStatus.NOT_CHECKED;
+
+    public static void main(String[] args) throws Exception {
+        // 1. Page size < G1HeapRegionSize
+        //    Test default page with 1MB heap region size
+        testMemoryTouch("-XX:-UseLargePages", G1HeapRegionSize1MB);
+        // 2. Page size > G1HeapRegionSize
+        //    Test large page with 1MB heap region size.
+        testMemoryTouch("-XX:+UseLargePages", G1HeapRegionSize1MB);
+        // 3. Page size < G1HeapRegionSize
+        //    Test large page with 8MB heap region size.
+        testMemoryTouch("-XX:+UseLargePages", G1HeapRegionSize8MB);
+    }
+
+    // On Linux, always UseNUMA is enabled if there is multiple active numa nodes.
+    static NUMASupportStatus checkNUMAIsEnabled(OutputAnalyzer output) {
+        boolean supportNUMA = Boolean.parseBoolean(output.firstMatch("\\bUseNUMA\\b.*?=.*?([a-z]+)", 1));
+        System.out.println("supportNUMA=" + supportNUMA);
+        return supportNUMA ? NUMASupportStatus.SUPPORT : NUMASupportStatus.NOT_SUPPORT;
+    }
+
+    static long parseSizeString(String size) {
+        long multiplier = 1;
+
+        if (size.endsWith("B")) {
+            multiplier = 1;
+        } else if (size.endsWith("K")) {
+            multiplier = 1024;
+        } else if (size.endsWith("M")) {
+            multiplier = 1024 * 1024;
+        } else if (size.endsWith("G")) {
+            multiplier = 1024 * 1024 * 1024;
+        } else {
+            throw new IllegalArgumentException("Expected memory string '" + size + "'to end with either of: B, K, M, G");
+        }
+
+        long longSize = Long.parseUnsignedLong(size.substring(0, size.length() - 1));
+
+        return longSize * multiplier;
+    }
+
+    static long heapPageSize(OutputAnalyzer output) {
+        String HeapPageSizePattern = "Heap:  .*page_size=([^ ]+)";
+        String str = output.firstMatch(HeapPageSizePattern, 1);
+
+        if (str == null) {
+            output.reportDiagnosticSummary();
+            throw new RuntimeException("Match from '" + HeapPageSizePattern + "' got 'null'");
+        }
+
+        return parseSizeString(str);
+    }
+
+    // 1. -UseLargePages: default page, page size < G1HeapRegionSize
+    //    +UseLargePages: large page size <= G1HeapRegionSize
+    //
+    //    Each 'int' represents a numa id of single HeapRegion (bottom page).
+    //    e.g. 1MB heap region, 2MB page size and 2 NUMA nodes system
+    //         Check the first set(2 regions)
+    //         0| ...omitted..| 0
+    //         1| ...omitted..| 1
+    static void checkCase1Pattern(OutputAnalyzer output, int index, long g1HeapRegionSize, long actualPageSize, int[] memoryNodeIds) throws Exception {
+        StringBuilder sb = new StringBuilder();
+
+        // Append index which means heap region index.
+        sb.append(String.format("%6d", index));
+        sb.append("| .* | ");
+
+        // Append page node id.
+        sb.append(memoryNodeIds[index]);
+
+        output.shouldMatch(sb.toString());
+    }
+
+    // 3. +UseLargePages: large page size > G1HeapRegionSize
+    //
+    //    As a OS page is consist of multiple heap regions, log also should be
+    //    printed multiple times for same numa id.
+    //    e.g. 1MB heap region, 2MB page size and 2 NUMA nodes system
+    //         Check the first set(4 regions)
+    //         0| ...omitted..| 0
+    //         1| ...omitted..| 0
+    //         2| ...omitted..| 1
+    //         3| ...omitted..| 1
+    static void checkCase2Pattern(OutputAnalyzer output, int index, long g1HeapRegionSize, long actualPageSize, int[] memoryNodeIds) throws Exception {
+        StringBuilder sb = new StringBuilder();
+
+        // Append page range.
+        int lines_to_print = (int)(actualPageSize / g1HeapRegionSize);
+        for (int i = 0; i < lines_to_print; i++) {
+            // Append index which means heap region index.
+            sb.append(String.format("%6d", index * lines_to_print + i));
+            sb.append("| .* | ");
+
+            // Append page node id.
+            sb.append(memoryNodeIds[index]);
+
+            output.shouldMatch(sb.toString());
+            sb.setLength(0);
+        }
+    }
+
+    static void checkNUMALog(OutputAnalyzer output, int regionSizeInMB) throws Exception {
+        WhiteBox wb = WhiteBox.getWhiteBox();
+        long g1HeapRegionSize = regionSizeInMB * 1024 * 1024;
+        long actualPageSize = heapPageSize(output);
+        long defaultPageSize = (long)wb.getVMPageSize();
+        int memoryNodeCount = wb.g1ActiveMemoryNodeCount();
+        int[] memoryNodeIds = wb.g1MemoryNodeIds();
+
+        System.out.println("node count=" + memoryNodeCount + ", actualPageSize=" + actualPageSize);
+        // Check for the first set of active numa nodes.
+        for (int index = 0; index < memoryNodeCount; index++) {
+            if (actualPageSize <= defaultPageSize) {
+                checkCase1Pattern(output, index, g1HeapRegionSize, actualPageSize, memoryNodeIds);
+            } else {
+                checkCase2Pattern(output, index, g1HeapRegionSize, actualPageSize, memoryNodeIds);
+            }
+        }
+    }
+
+    static void testMemoryTouch(String largePagesSetting, int regionSizeInMB) throws Exception {
+        // Skip testing with message.
+        if (status == NUMASupportStatus.NOT_SUPPORT) {
+            System.out.println("NUMA is not supported");
+            return;
+        }
+
+        ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(
+                                              "-Xbootclasspath/a:.",
+                                              "-Xlog:pagesize,gc+heap+region=trace",
+                                              "-XX:+UseG1GC",
+                                              "-Xmx128m",
+                                              "-Xms128m",
+                                              "-XX:+UnlockDiagnosticVMOptions",
+                                              "-XX:+WhiteBoxAPI",
+                                              "-XX:+PrintFlagsFinal",
+                                              "-XX:+UseNUMA",
+                                              "-XX:+AlwaysPreTouch",
+                                              largePagesSetting,
+                                              "-XX:G1HeapRegionSize=" + regionSizeInMB + "m",
+                                              GCTest.class.getName());
+        OutputAnalyzer output = new OutputAnalyzer(pb_enabled.start());
+
+        // Check NUMA availability.
+        if (status == NUMASupportStatus.NOT_CHECKED) {
+            status = checkNUMAIsEnabled(output);
+        }
+
+        if (status == NUMASupportStatus.SUPPORT) {
+            checkNUMALog(output, regionSizeInMB);
+        } else {
+            // Exit with message for the first test.
+            System.out.println("NUMA is not supported");
+        }
+    }
+
+  static class GCTest {
+    public static final int M = 1024*1024;
+    public static LinkedList<Object> garbageList = new LinkedList<Object>();
+    // A large object referenced by a static.
+    static int[] filler = new int[10 * M];
+
+    public static void genGarbage() {
+      for (int i = 0; i < 32*1024; i++) {
+        garbageList.add(new int[100]);
+      }
+      garbageList.clear();
+    }
+
+    public static void main(String[] args) {
+
+      int[] large = new int[M];
+      Object ref = large;
+
+      System.out.println("Creating garbage");
+      for (int i = 0; i < 100; i++) {
+        // A large object that will be reclaimed eagerly.
+        large = new int[6*M];
+        genGarbage();
+        // Make sure that the compiler cannot completely remove
+        // the allocation of the large object until here.
+        System.out.println(large);
+      }
+
+      // Keep the reference to the first object alive.
+      System.out.println(ref);
+      System.out.println("Done");
+    }
+  }
+}
--- a/test/hotspot/jtreg/gc/logging/TestGCId.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/logging/TestGCId.java	Thu Nov 14 13:50:03 2019 +0000
@@ -53,10 +53,6 @@
       noneGCSupported = false;
       testGCId("UseG1GC");
     }
-    if (GC.ConcMarkSweep.isSupported()) {
-      noneGCSupported = false;
-      testGCId("UseConcMarkSweepGC");
-    }
     if (GC.Serial.isSupported()) {
       noneGCSupported = false;
       testGCId("UseSerialGC");
@@ -67,7 +63,7 @@
     }
 
     if (noneGCSupported) {
-      throw new SkippedException("Skipping test because none of Parallel/G1/ConcMarkSweep/Serial/Shenandoah is supported.");
+      throw new SkippedException("Skipping test because none of Parallel/G1/Serial/Shenandoah is supported.");
     }
   }
 
--- a/test/hotspot/jtreg/gc/metaspace/TestMetaspaceCMSCancel.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.metaspace;
-import jdk.test.lib.process.ProcessTools;
-import jdk.test.lib.process.OutputAnalyzer;
-import jdk.test.lib.Asserts;
-import sun.hotspot.WhiteBox;
-
-/* @test TestMetaspaceCMSCancel
- * @bug 8026752
- * @summary Tests cancel of CMS concurrent cycle for Metaspace after a full GC
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- * @build sun.hotspot.WhiteBox
- * @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm gc.metaspace.TestMetaspaceCMSCancel
- */
-
-
-public class TestMetaspaceCMSCancel {
-
-    public static void main(String[] args) throws Exception {
-        // Set a small MetaspaceSize so that a CMS concurrent collection will be
-        // scheduled.  Set CMSWaitDuration to 5s so that the concurrent collection
-        // start may be delayed.  It does not guarantee 5s before the start of the
-        // concurrent collection but does increase the probability that it will
-        // be started later.  System.gc() is used to invoke a full collection.  Set
-        // ExplicitGCInvokesConcurrent to off so it is a STW collection.
-        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xbootclasspath/a:.",
-                                                                  "-XX:+UnlockDiagnosticVMOptions",
-                                                                  "-XX:+WhiteBoxAPI",
-                                                                  "-XX:+UseConcMarkSweepGC",
-                                                                  "-XX:MetaspaceSize=2m",
-                                                                  "-XX:CMSWaitDuration=5000",
-                                                                  "-XX:-ExplicitGCInvokesConcurrent",
-                                                                  "-Xlog:gc*=debug",
-                                                                  MetaspaceGCTest.class.getName());
-
-        OutputAnalyzer output = new OutputAnalyzer(pb.start());
-        output.shouldNotContain("Concurrent Reset");
-        output.shouldHaveExitValue(0);
-    }
-
-    static class MetaspaceGCTest {
-        public static void main(String [] args) {
-            WhiteBox wb = WhiteBox.getWhiteBox();
-            System.gc();
-            Asserts.assertFalse(wb.metaspaceShouldConcurrentCollect());
-        }
-    }
-}
--- a/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/metaspace/TestSizeTransitions.java	Thu Nov 14 13:50:03 2019 +0000
@@ -58,15 +58,6 @@
  * @run driver gc.metaspace.TestSizeTransitions true  -XX:+UseG1GC
  */
 
-/* @test TestSizeTransitionsCMS
- * @key gc
- * @requires vm.gc.ConcMarkSweep
- * @summary Tests that the metaspace size transition logging is done correctly.
- * @library /test/lib
- * @run driver gc.metaspace.TestSizeTransitions false -XX:+UseConcMarkSweepGC
- * @run driver gc.metaspace.TestSizeTransitions true  -XX:+UseConcMarkSweepGC
- */
-
 public class TestSizeTransitions {
   public static class Run {
     public static void main(String... args) throws Exception {
--- a/test/hotspot/jtreg/gc/startup_warnings/TestCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package gc.startup_warnings;
-
-/*
- * @test TestCMS
- * @key gc
- * @bug 8006398 8155948 8179013
- * @summary Test that CMS prints a warning message
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @library /test/lib
- * @modules java.base/jdk.internal.misc
- *          java.management
- * @run main gc.startup_warnings.TestCMS
- */
-
-import jdk.test.lib.process.ProcessTools;
-import jdk.test.lib.process.OutputAnalyzer;
-
-public class TestCMS {
-
-  public static void runTest(String[] args) throws Exception {
-    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
-    OutputAnalyzer output = new OutputAnalyzer(pb.start());
-    output.shouldContain("deprecated");
-    output.shouldNotContain("error");
-    output.shouldHaveExitValue(0);
-  }
-
-  public static void main(String args[]) throws Exception {
-    runTest(new String[] {"-XX:+UseConcMarkSweepGC", "-version"});
-    runTest(new String[] {"-Xconcgc", "-version"});
-    runTest(new String[] {"-Xnoconcgc", "-version"});
-  }
-
-}
--- a/test/hotspot/jtreg/gc/stress/TestReclaimStringsLeaksMemory.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/stress/TestReclaimStringsLeaksMemory.java	Thu Nov 14 13:50:03 2019 +0000
@@ -35,7 +35,6 @@
  * @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseSerialGC
  * @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseParallelGC
  * @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseParallelGC -XX:-UseParallelOldGC
- * @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseConcMarkSweepGC
  * @run main/othervm gc.stress.TestReclaimStringsLeaksMemory -XX:+UseG1GC
  */
 
--- a/test/hotspot/jtreg/gc/stress/gcbasher/TestGCBasherWithCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.gcbasher;
-
-import java.io.IOException;
-
-/*
- * @test TestGCBasherWithCMS
- * @key gc stress
- * @library /
- * @requires vm.gc.ConcMarkSweep
- * @requires vm.flavor == "server" & !vm.emulatedClient & !vm.graal.enabled
- * @summary Stress the CMS GC by trying to make old objects more likely to be garbage than young objects.
- * @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx256m -server -XX:+UseConcMarkSweepGC gc.stress.gcbasher.TestGCBasherWithCMS 120000
- */
-public class TestGCBasherWithCMS {
-    public static void main(String[] args) throws IOException {
-        TestGCBasher.main(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/stress/gclocker/TestGCLockerWithCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.gclocker;
-
-/*
- * @test TestGCLockerWithCMS
- * @key gc
- * @library /
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Stress CMS' GC locker by calling GetPrimitiveArrayCritical while concurrently filling up old gen.
- * @run main/native/othervm/timeout=200 -Xlog:gc*=info -Xms1500m -Xmx1500m -XX:+UseConcMarkSweepGC gc.stress.gclocker.TestGCLockerWithCMS
- */
-public class TestGCLockerWithCMS {
-    public static void main(String[] args) {
-        String[] testArgs = {"2", "CMS Old Gen"};
-        TestGCLocker.main(testArgs);
-    }
-}
--- a/test/hotspot/jtreg/gc/stress/gcold/TestGCOldWithCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.gcold;
-
-/*
- * @test TestGCOldWithCMS
- * @key gc
- * @library /
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Stress the CMS GC by trying to make old objects more likely to be garbage than young objects.
- * @run main/othervm -Xmx384M -XX:+UseConcMarkSweepGC gc.stress.gcold.TestGCOldWithCMS 50 1 20 10 10000
- */
-public class TestGCOldWithCMS {
-    public static void main(String[] args) {
-        TestGCOld.main(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx64m jdk.jfr.event.gc.detailed.TestStressAllocationGCEventsWithCMS
- */
-public class TestStressAllocationGCEventsWithCMS {
-
-    public static void main(String[] args) throws Exception {
-        new StressAllocationGCEvents().run(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressAllocationGCEventsWithParNew.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires vm.gc == "null" & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx64m jdk.jfr.event.gc.detailed.TestStressAllocationGCEventsWithParNew
- */
-public class TestStressAllocationGCEventsWithParNew {
-
-    public static void main(String[] args) throws Exception {
-        new StressAllocationGCEvents().run(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx256m jdk.jfr.event.gc.detailed.TestStressBigAllocationGCEventsWithCMS 1048576
- */
-public class TestStressBigAllocationGCEventsWithCMS {
-
-    public static void main(String[] args) throws Exception {
-        new StressAllocationGCEvents().run(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/stress/jfr/TestStressBigAllocationGCEventsWithParNew.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @requires vm.hasJFR
- * @requires vm.gc == "null" & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xmx256m jdk.jfr.event.gc.detailed.TestStressBigAllocationGCEventsWithParNew 1048576
- */
-public class TestStressBigAllocationGCEventsWithParNew {
-
-    public static void main(String[] args) throws Exception {
-        new StressAllocationGCEvents().run(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/stress/systemgc/TestSystemGCWithCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package gc.stress.systemgc;
-
-/*
- * @test TestSystemGCWithCMS
- * @key gc stress
- * @bug 8190703
- * @library /
- * @requires vm.gc.ConcMarkSweep & !vm.graal.enabled
- * @summary Stress the CMS GC full GC by allocating objects of different lifetimes concurrently with System.gc().
- * @run main/othervm/timeout=300 -Xlog:gc*=info -Xmx512m -XX:+UseConcMarkSweepGC gc.stress.systemgc.TestSystemGCWithCMS 270
- */
-public class TestSystemGCWithCMS {
-    public static void main(String[] args) throws Exception {
-        TestSystemGC.main(args);
-    }
-}
--- a/test/hotspot/jtreg/gc/survivorAlignment/SurvivorAlignmentTestMain.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/gc/survivorAlignment/SurvivorAlignmentTestMain.java	Thu Nov 14 13:50:03 2019 +0000
@@ -71,7 +71,6 @@
     private static final String G1_EDEN = "G1 Eden Space";
     private static final String G1_SURVIVOR = "G1 Survivor Space";
     private static final String SERIAL_TENURED = "Tenured Gen";
-    private static final String CMS_TENURED = "CMS Old Gen";
     private static final String PS_TENURED = "PS Old Gen";
     private static final String G1_TENURED = "G1 Old Gen";
 
@@ -79,14 +78,6 @@
             SurvivorAlignmentTestMain.WHITE_BOX.getUintxVMFlag(
                     "G1HeapRegionSize")).orElse(-1L);
 
-    /**
-     * Min size of free chunk in CMS generation.
-     * An object allocated in CMS generation will at least occupy this amount
-     * of bytes.
-     */
-    private static final long CMS_MIN_FREE_CHUNK_SIZE
-            = 3L * Unsafe.ADDRESS_SIZE;
-
     private static final AlignmentHelper EDEN_SPACE_HELPER;
     private static final AlignmentHelper SURVIVOR_SPACE_HELPER;
     private static final AlignmentHelper TENURED_SPACE_HELPER;
@@ -125,11 +116,6 @@
      * alignment in other spaces is expected to be equal to
      * {@code ObjectAlignmentInBytes} value.
      *
-     * In CMS generation we can't allocate less then {@code MinFreeChunk} value,
-     * for other CGs we expect that object of size {@code MIN_OBJECT_SIZE}
-     * could be allocated as it is (of course, its size could be aligned
-     * according to alignment value used in a particular space).
-     *
      * For G1 GC MXBeans could report memory usage only with region size
      * precision (if an object allocated in some G1 heap region, then all region
      * will claimed as used), so for G1's spaces precision is equal to
@@ -187,15 +173,6 @@
                             AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
                             AlignmentHelper.MIN_OBJECT_SIZE, pool);
                     break;
-                case SurvivorAlignmentTestMain.CMS_TENURED:
-                    Asserts.assertNull(tenuredHelper,
-                            "Only one bean for tenured space is expected.");
-                    tenuredHelper = new AlignmentHelper(
-                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
-                            AlignmentHelper.OBJECT_ALIGNMENT_IN_BYTES,
-                            SurvivorAlignmentTestMain.CMS_MIN_FREE_CHUNK_SIZE,
-                            pool);
-                    break;
             }
         }
         EDEN_SPACE_HELPER = Objects.requireNonNull(edenHelper,
--- a/test/hotspot/jtreg/runtime/7167069/PrintAsFlag.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/7167069/PrintAsFlag.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
  *
  * @test PrintAsFlag
  * @summary verify that Flag::print_as_flag() works correctly. This is used by "jinfo -flag" and -XX:+PrintCommandLineFlags.
- * @run main/othervm -XX:+PrintCommandLineFlags -XX:-ShowMessageBoxOnError -XX:BiasedLockingStartupDelay=4000 -XX:ParallelGCThreads=4 -XX:MaxRAM=1G -XX:CMSSmallCoalSurplusPercent=1.05 -XX:ErrorFile="file" PrintAsFlag
+ * @run main/othervm -XX:+PrintCommandLineFlags -XX:-ShowMessageBoxOnError -XX:BiasedLockingStartupDelay=4000 -XX:ParallelGCThreads=4 -XX:MaxRAM=1G -XX:ErrorFile="file" PrintAsFlag
  */
 
 public class PrintAsFlag {
--- a/test/hotspot/jtreg/runtime/CheckUnhandledOops/TestVerifyOops.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/CheckUnhandledOops/TestVerifyOops.java	Thu Nov 14 13:50:03 2019 +0000
@@ -21,10 +21,19 @@
  * questions.
  */
 
+// The test fails on sparc because there are errors in VerifyOops.
 /*
  * @test
  * @bug 8231058
- * @requires vm.debug & (os.arch != "sparc") & (os.arch != "sparcv9")
+ * @requires vm.debug & vm.bits == "64"
+ * @requires (os.arch != "sparcv9")
+ * @run main/othervm -XX:+VerifyOops -XX:+UseCompressedOops TestVerifyOops
+ * @run main/othervm -XX:+VerifyOops -XX:-UseCompressedOops TestVerifyOops
+ */
+/*
+ * @test
+ * @bug 8231058
+ * @requires vm.debug & vm.bits == "32"
  * @run main/othervm -XX:+VerifyOops TestVerifyOops
  */
 
--- a/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java	Thu Nov 14 13:50:03 2019 +0000
@@ -219,11 +219,6 @@
         excludeTestMinRange("MallocMaxTestWords");
 
         /*
-         * Exclude CMSSamplingGrain as it can cause intermittent failures on Windows
-         */
-        excludeTestRange("CMSSamplingGrain");
-
-        /*
          * Exclude below options as their maximum value would consume too much memory
          * and would affect other tests that run in parallel.
          */
--- a/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOption.java	Thu Nov 14 13:50:03 2019 +0000
@@ -400,8 +400,7 @@
         }
 
         if (GCType != null &&
-            !(prepend.contains("-XX:+UseConcMarkSweepGC") ||
-              prepend.contains("-XX:+UseSerialGC") ||
+            !(prepend.contains("-XX:+UseSerialGC") ||
               prepend.contains("-XX:+UseParallelGC") ||
               prepend.contains("-XX:+UseG1GC"))) {
             explicitGC = GCType;
--- a/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/common/optionsvalidation/JVMOptionsUtils.java	Thu Nov 14 13:50:03 2019 +0000
@@ -79,9 +79,6 @@
 
         for (GarbageCollectorMXBean gcMxBean : gcMxBeans) {
             switch (gcMxBean.getName()) {
-                case "ConcurrentMarkSweep":
-                    GCType = "-XX:+UseConcMarkSweepGC";
-                    break;
                 case "MarkSweepCompact":
                     GCType = "-XX:+UseSerialGC";
                     break;
@@ -186,10 +183,6 @@
             option.addPrepend("-XX:+UseG1GC");
         }
 
-        if (name.startsWith("CMS")) {
-            option.addPrepend("-XX:+UseConcMarkSweepGC");
-        }
-
         if (name.startsWith("NUMA")) {
             option.addPrepend("-XX:+UseNUMA");
         }
@@ -207,18 +200,6 @@
             case "MaxMetaspaceFreeRatio":
                 option.addPrepend("-XX:MinMetaspaceFreeRatio=0");
                 break;
-            case "CMSOldPLABMin":
-                option.addPrepend("-XX:CMSOldPLABMax=" + option.getMax());
-                break;
-            case "CMSOldPLABMax":
-                option.addPrepend("-XX:CMSOldPLABMin=" + option.getMin());
-                break;
-            case "CMSPrecleanNumerator":
-                option.addPrepend("-XX:CMSPrecleanDenominator=" + option.getMax());
-                break;
-            case "CMSPrecleanDenominator":
-                option.addPrepend("-XX:CMSPrecleanNumerator=" + ((new Integer(option.getMin())) - 1));
-                break;
             case "G1RefProcDrainInterval":
                 option.addPrepend("-XX:+ExplicitGCInvokesConcurrent");
                 break;
@@ -228,9 +209,6 @@
             case "NUMAInterleaveGranularity":
                 option.addPrepend("-XX:+UseNUMAInterleaving");
                 break;
-            case "CPUForCMSThread":
-                option.addPrepend("-XX:+BindCMSThreadToCPU");
-                break;
             case "VerifyGCStartAt":
                 option.addPrepend("-XX:+VerifyBeforeGC");
                 option.addPrepend("-XX:+VerifyAfterGC");
--- a/test/hotspot/jtreg/runtime/CommandLine/TestNullTerminatedFlags.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/CommandLine/TestNullTerminatedFlags.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,6 @@
 public class TestNullTerminatedFlags {
    public static String[] options = {
             "-Xnoclassgc",
-            "-Xconcgc",
-            "-Xnoconcgc",
             "-Xbatch",
             "-green",
             "-native",
--- a/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/CompressedOops/UseCompressedOops.java	Thu Nov 14 13:50:03 2019 +0000
@@ -58,9 +58,6 @@
         testCompressedOopsModes(args);
         // Test GCs.
         testCompressedOopsModes(args, "-XX:+UseG1GC");
-        if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
-            testCompressedOopsModes(args, "-XX:+UseConcMarkSweepGC");
-        }
         testCompressedOopsModes(args, "-XX:+UseSerialGC");
         testCompressedOopsModes(args, "-XX:+UseParallelGC");
         testCompressedOopsModes(args, "-XX:+UseParallelOldGC");
--- a/test/hotspot/jtreg/runtime/cds/SpaceUtilizationCheck.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/cds/SpaceUtilizationCheck.java	Thu Nov 14 13:50:03 2019 +0000
@@ -73,8 +73,8 @@
                 Matcher matcher = pattern.matcher(line);
                 if (matcher.find()) {
                     String name = matcher.group(1);
-                    if (name.equals("s0") || name.equals("s1")) {
-                      // String regions are listed at the end and they may not be fully occupied.
+                    if (name.equals("bm")) {
+                      // Bitmap space does not have a requested address.
                       break;
                     } else {
                       System.out.println("Checking " + name + " in : " + line);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/ArchiveRelocationTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @comment the test uses -XX:ArchiveRelocationMode=1 to force relocation.
+ * @requires vm.cds
+ * @summary Testing relocation of CDS archive (during both dump time and run time)
+ * @comment JDK-8231610 Relocate the CDS archive if it cannot be mapped to the requested address
+ * @bug 8231610
+ * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds/test-classes
+ * @build Hello
+ * @run driver ClassFileInstaller -jar hello.jar Hello
+ * @run driver ArchiveRelocationTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jtreg.SkippedException;
+
+public class ArchiveRelocationTest {
+    public static void main(String... args) throws Exception {
+        try {
+            test(true,  false);
+            test(false, true);
+            test(true,  true);
+        } catch (SkippedException s) {
+            s.printStackTrace();
+            throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)");
+        }
+    }
+
+    static int caseCount = 0;
+
+    // dump_reloc - force relocation of archive during dump time?
+    // run_reloc  - force relocation of archive during run time?
+    static void test(boolean dump_reloc, boolean run_reloc) throws Exception {
+        caseCount += 1;
+        System.out.println("============================================================");
+        System.out.println("case = " + caseCount + ", dump = " + dump_reloc
+                           + ", run = " + run_reloc);
+        System.out.println("============================================================");
+
+
+        String appJar = ClassFileInstaller.getJarPath("hello.jar");
+        String mainClass = "Hello";
+        String forceRelocation = "-XX:ArchiveRelocationMode=1";
+        String dumpRelocArg = dump_reloc ? forceRelocation : "-showversion";
+        String runRelocArg  = run_reloc  ? forceRelocation : "-showversion";
+        String logArg = "-Xlog:cds=debug,cds+reloc=debug";
+        String unlockArg = "-XX:+UnlockDiagnosticVMOptions";
+
+        OutputAnalyzer out = TestCommon.dump(appJar,
+                                             TestCommon.list(mainClass),
+                                             unlockArg, dumpRelocArg, logArg);
+        if (dump_reloc) {
+            out.shouldContain("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
+            out.shouldContain("Relocating archive from");
+        }
+
+        TestCommon.run("-cp", appJar, unlockArg, runRelocArg, logArg,  mainClass)
+            .assertNormalExit(output -> {
+                    if (run_reloc) {
+                        output.shouldContain("ArchiveRelocationMode == 1: always map archive(s) at an alternative address");
+                        output.shouldContain("runtime archive relocation start");
+                        output.shouldContain("runtime archive relocation done");
+                    }
+                });
+    }
+}
--- a/test/hotspot/jtreg/runtime/cds/appcds/CommandLineFlagCombo.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/CommandLineFlagCombo.java	Thu Nov 14 13:50:03 2019 +0000
@@ -50,7 +50,7 @@
 
     // shared base address test table
     private static final String[] testTable = {
-        "-XX:+UseG1GC", "-XX:+UseSerialGC", "-XX:+UseParallelGC", "-XX:+UseConcMarkSweepGC",
+        "-XX:+UseG1GC", "-XX:+UseSerialGC", "-XX:+UseParallelGC",
         "-XX:+FlightRecorder",
         "-XX:+UseLargePages", // may only take effect on machines with large-pages
         "-XX:+UseCompressedClassPointers",
@@ -123,18 +123,11 @@
             }
         }
 
-        if (Compiler.isGraalEnabled() && testEntry.equals("-XX:+UseConcMarkSweepGC"))
-        {
-            System.out.println("Graal does not support CMS");
-            return true;
-        }
-
         if (!WhiteBox.getWhiteBox().isJFRIncludedInVmBuild() && testEntry.equals("-XX:+FlightRecorder"))
         {
             System.out.println("JFR does not exist");
             return true;
         }
-
         return false;
     }
 }
--- a/test/hotspot/jtreg/runtime/cds/appcds/TestCommon.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/TestCommon.java	Thu Nov 14 13:50:03 2019 +0000
@@ -343,6 +343,7 @@
             newFile.renameTo(oldFile);
             System.out.println("firstJar = " + firstJar + " Modified");
         } else {
+            zipFile.close();
             System.out.println("firstJar = " + firstJar);
         }
     }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveRelocationTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @comment the test uses -XX:ArchiveRelocationMode=1 to force relocation.
+ * @requires vm.cds
+ * @summary Testing relocation of dynamic CDS archive (during both dump time and run time)
+ * @comment JDK-8231610 Relocate the CDS archive if it cannot be mapped to the requested address
+ * @bug 8231610
+ * @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds /test/hotspot/jtreg/runtime/cds/appcds/test-classes
+ * @build Hello
+ * @run driver ClassFileInstaller -jar hello.jar Hello
+ * @run driver DynamicArchiveRelocationTest
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jtreg.SkippedException;
+
+public class DynamicArchiveRelocationTest extends DynamicArchiveTestBase {
+    public static void main(String... args) throws Exception {
+        try {
+            testOuter(false);
+            testOuter(true);
+        } catch (SkippedException s) {
+            s.printStackTrace();
+            throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)");
+        }
+    }
+
+    static void testOuter(boolean dump_base_reloc) throws Exception {
+        testInner(dump_base_reloc, true,  false);
+        testInner(dump_base_reloc, false, true);
+        testInner(dump_base_reloc, true,  true);
+    }
+
+    static boolean dump_base_reloc, dump_top_reloc, run_reloc;
+
+    // dump_base_reloc - force relocation of archive when dumping base archive
+    // dump_top_reloc  - force relocation of archive when dumping top  archive
+    // run_reloc       - force relocation of archive when running
+    static void testInner(boolean dump_base_reloc, boolean dump_top_reloc, boolean run_reloc) throws Exception {
+        DynamicArchiveRelocationTest.dump_base_reloc = dump_base_reloc;
+        DynamicArchiveRelocationTest.dump_top_reloc  = dump_top_reloc;
+        DynamicArchiveRelocationTest.run_reloc       = run_reloc;
+
+        runTest(DynamicArchiveRelocationTest::doTest);
+    }
+
+    static int caseCount = 0;
+    static void doTest() throws Exception {
+        caseCount += 1;
+        System.out.println("============================================================");
+        System.out.println("case = " + caseCount + ", base = " + dump_base_reloc
+                           + ", top = " + dump_top_reloc
+                           + ", run = " + run_reloc);
+        System.out.println("============================================================");
+
+        String appJar = ClassFileInstaller.getJarPath("hello.jar");
+        String mainClass = "Hello";
+        String forceRelocation = "-XX:ArchiveRelocationMode=1";
+        String dumpBaseRelocArg = dump_base_reloc ? forceRelocation : "-showversion";
+        String dumpTopRelocArg  = dump_top_reloc  ? forceRelocation : "-showversion";
+        String runRelocArg      = run_reloc       ? forceRelocation : "-showversion";
+        String logArg = "-Xlog:cds=debug,cds+reloc=debug";
+
+        String baseArchiveName = getNewArchiveName("base");
+        String topArchiveName  = getNewArchiveName("top");
+
+        String runtimeMsg1 = "ArchiveRelocationMode == 1: always map archive(s) at an alternative address";
+        String runtimeMsg2 = "runtime archive relocation start";
+        String runtimeMsg3 = "runtime archive relocation done";
+        String unlockArg = "-XX:+UnlockDiagnosticVMOptions";
+
+        // (1) Dump base archive (static)
+
+        OutputAnalyzer out = dumpBaseArchive(baseArchiveName, unlockArg, dumpBaseRelocArg, logArg);
+        if (dump_base_reloc) {
+            out.shouldContain("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
+            out.shouldContain("Relocating archive from");
+        }
+
+        // (2) Dump top archive (dynamic)
+
+        dump2(baseArchiveName, topArchiveName,
+              unlockArg,
+              dumpTopRelocArg,
+              logArg,
+              "-cp", appJar, mainClass)
+            .assertNormalExit(output -> {
+                    if (dump_top_reloc) {
+                        output.shouldContain(runtimeMsg1);
+                        output.shouldContain(runtimeMsg2);
+                        output.shouldContain(runtimeMsg3);
+                    }
+                });
+
+        run2(baseArchiveName, topArchiveName,
+             unlockArg,
+             runRelocArg,
+             logArg,
+            "-cp", appJar, mainClass)
+            .assertNormalExit(output -> {
+                    if (run_reloc) {
+                        output.shouldContain(runtimeMsg1);
+                        output.shouldContain(runtimeMsg2);
+                        output.shouldContain(runtimeMsg3);
+                    }
+                });
+    }
+}
--- a/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveTestBase.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/dynamicArchive/DynamicArchiveTestBase.java	Thu Nov 14 13:50:03 2019 +0000
@@ -134,7 +134,7 @@
      * Dump the base archive. The JDK's default class list is used (unless otherwise specified
      * in cmdLineSuffix).
      */
-    public static void dumpBaseArchive(String baseArchiveName, String ... cmdLineSuffix)
+    public static OutputAnalyzer dumpBaseArchive(String baseArchiveName, String ... cmdLineSuffix)
         throws Exception
     {
         CDSOptions opts = new CDSOptions();
@@ -143,6 +143,7 @@
         opts.addSuffix("-Djava.class.path=");
         OutputAnalyzer out = CDSTestUtils.createArchive(opts);
         CDSTestUtils.checkDump(out);
+        return out;
     }
 
     /**
--- a/test/hotspot/jtreg/runtime/cds/appcds/sharedStrings/IncompatibleOptions.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/cds/appcds/sharedStrings/IncompatibleOptions.java	Thu Nov 14 13:50:03 2019 +0000
@@ -112,9 +112,6 @@
         // incompatible GCs
         testDump(2, "-XX:+UseParallelGC", "", GC_WARNING, false);
         testDump(3, "-XX:+UseSerialGC", "", GC_WARNING, false);
-        if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
-            testDump(4, "-XX:+UseConcMarkSweepGC", "", GC_WARNING, false);
-        }
 
         // ======= archive with compressed oops, run w/o
         testDump(5, "-XX:+UseG1GC", "-XX:+UseCompressedOops", null, false);
@@ -125,9 +122,6 @@
         // Still run, to ensure no crash or exception
         testExec(6, "-XX:+UseParallelGC", "", "", false);
         testExec(7, "-XX:+UseSerialGC", "", "", false);
-        if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
-            testExec(8, "-XX:+UseConcMarkSweepGC", "", "", false);
-        }
 
         // Test various oops encodings, by varying ObjectAlignmentInBytes and heap sizes
         testDump(9, "-XX:+UseG1GC", "-XX:ObjectAlignmentInBytes=8", null, false);
--- a/test/hotspot/jtreg/runtime/execstack/TestMT.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/execstack/TestMT.java	Thu Nov 14 13:50:03 2019 +0000
@@ -78,7 +78,7 @@
         public void run() {
             for (int i = 0; i < 10; ++i) {
                 TestMT.run(getName());
-                yield();
+                Thread.yield();
             }
         }
     }
--- a/test/hotspot/jtreg/runtime/testlibrary/ClassUnloadCommon.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/runtime/testlibrary/ClassUnloadCommon.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -62,7 +62,7 @@
     }
 
     public static void triggerUnloading() {
-        allocateMemory(16 * 1024); // yg size is 8m with cms, force young collection
+        allocateMemory(16 * 1024); // force young collection
         System.gc();
     }
 
--- a/test/hotspot/jtreg/serviceability/jvmti/HeapMonitor/MyPackage/HeapMonitorGCCMSTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2018, Google and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package MyPackage;
-
-/**
- * @test
- * @summary Verifies the JVMTI Heap Monitor Statistics using CMS GC
- * @build Frame HeapMonitor
- * @requires vm.gc == "ConcMarkSweep" | vm.gc == "null"
- * @requires !vm.graal.enabled
- * @compile HeapMonitorGCCMSTest.java
- * @run main/othervm/native -agentlib:HeapMonitorTest -XX:+UseConcMarkSweepGC MyPackage.HeapMonitorGCTest
- */
--- a/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/TestIntConstant.java	Thu Nov 14 13:50:03 2019 +0000
@@ -64,11 +64,10 @@
             // with names and the values derived from enums and #define preprocessor
             // macros in hotspot.
             expStrMap.put("intConstant", List.of(
-                 "CollectedHeap::G1 4",
+                 "CollectedHeap::G1 3",
                  "RUNNABLE 2",
                  "Deoptimization::Reason_class_check 4",
                  "InstanceKlass::_misc_is_unsafe_anonymous 32",
-                 "Generation::ParNew 1",
                  "_thread_uninitialized 0"));
             expStrMap.put("intConstant _temp_constant", List.of(
                  "intConstant _temp_constant 45"));
--- a/test/hotspot/jtreg/serviceability/sa/TestUniverse.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/TestUniverse.java	Thu Nov 14 13:50:03 2019 +0000
@@ -63,10 +63,6 @@
             expStrings.add("eden");
             break;
 
-        case ConcMarkSweep:
-            expStrings.add("Gen 1: concurrent mark-sweep generation");
-            break;
-
         case G1:
             expStrings.add("garbage-first heap");
             expStrings.add("region size");
@@ -112,7 +108,7 @@
         }
 
         if (Compiler.isGraalEnabled()) {
-            if (gc == GC.ConcMarkSweep || gc == GC.Epsilon || gc == GC.Z || gc == GC.Shenandoah) {
+            if (gc == GC.Epsilon || gc == GC.Z || gc == GC.Shenandoah) {
                 // Not supported
                 System.out.println ("Skipped testing of " + gc + "GC, not supported by Graal");
                 return false;
--- a/test/hotspot/jtreg/vmTestbase/gc/gctests/gctest02/gctest02.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/gc/gctests/gctest02/gctest02.java	Thu Nov 14 13:50:03 2019 +0000
@@ -180,7 +180,7 @@
                 while ( ThreadCount.get() > 0 ) {
                         int buf[] = new int[32];
                         {
-                                                yield();
+                                                Thread.yield();
                         }
                 }
         }
--- a/test/hotspot/jtreg/vmTestbase/jit/regression/b4446672/b4446672.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/jit/regression/b4446672/b4446672.java	Thu Nov 14 13:50:03 2019 +0000
@@ -76,7 +76,7 @@
         System.out.println ("GCThread synchronized.");
               while (!done) {
                 gcing=true;
-                yield();
+                Thread.yield();
                 System.gc();
               }
             }
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_0_1/TestDescription.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_0_1/TestDescription.java	Thu Nov 14 13:50:03 2019 +0000
@@ -32,7 +32,6 @@
  * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
  * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
  * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
  * @requires vm.gc != "Z"
  * @library /vmTestbase /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_10_20/TestDescription.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_10_20/TestDescription.java	Thu Nov 14 13:50:03 2019 +0000
@@ -32,7 +32,6 @@
  * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
  * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
  * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
  * @requires vm.gc != "Z"
  * @library /vmTestbase /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_70_80/TestDescription.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_70_80/TestDescription.java	Thu Nov 14 13:50:03 2019 +0000
@@ -32,7 +32,6 @@
  * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
  * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
  * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
  * @requires vm.gc != "Z"
  * @library /vmTestbase /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_99_100/TestDescription.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/metaspace/gc/watermark_99_100/TestDescription.java	Thu Nov 14 13:50:03 2019 +0000
@@ -32,7 +32,6 @@
  * @comment Don't run test in configurations where we can't reliably count number of metaspace triggered GCs
  * @requires vm.gc != null | !vm.opt.final.ClassUnloadingWithConcurrentMark
  * @requires vm.gc != "G1" | !vm.opt.final.ClassUnloadingWithConcurrentMark
- * @requires vm.gc != "ConcMarkSweep"
  * @requires vm.gc != "Z"
  * @library /vmTestbase /test/lib
  * @run driver jdk.test.lib.FileInstaller . .
--- a/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/jvmti/scenarios/general_functions/GF08/gf08t001/TestDriver.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,9 +44,9 @@
  *    First time, with "setVerboseMode=yes" agent mode. Second
  *    time, with "setVerboseMode=no" agent mode and with
  *    "-verbose:gc" VM option. In both cases the output is
- *    searched for 'Full GC' string, unless ExplicitGCInvokesConcurrent
- *    is enabled and G1 or CMS GCs are enbled. If ExplicitGCInvokesConcurrent and
- *    either G1 or CMS GCs are enbled the test searches for 'GC' string in output.
+ *    searched for 'Pause Full' string, unless ExplicitGCInvokesConcurrent
+ *    is enabled and G1 is enabled. If ExplicitGCInvokesConcurrent and
+ *    G1 is enabled the test searches for 'GC' string in output.
  *    The test fails if this string is not found in the output.
  * COMMENTS
  *
@@ -70,18 +70,17 @@
         sun.hotspot.WhiteBox wb = sun.hotspot.WhiteBox.getWhiteBox();
         Boolean isExplicitGCInvokesConcurrentOn = wb.getBooleanVMFlag("ExplicitGCInvokesConcurrent");
         Boolean isUseG1GCon = wb.getBooleanVMFlag("UseG1GC");
-        Boolean isUseConcMarkSweepGCon = wb.getBooleanVMFlag("UseConcMarkSweepGC");
         Boolean isUseZGCon = wb.getBooleanVMFlag("UseZGC");
         Boolean isShenandoahGCon = wb.getBooleanVMFlag("UseShenandoahGC");
         Boolean isUseEpsilonGCon = wb.getBooleanVMFlag("UseEpsilonGC");
 
         if (Compiler.isGraalEnabled() &&
-            (isUseConcMarkSweepGCon || isUseZGCon || isUseEpsilonGCon || isShenandoahGCon)) {
+            (isUseZGCon || isUseEpsilonGCon || isShenandoahGCon)) {
             return; // Graal does not support these GCs
         }
 
         String keyPhrase;
-        if ((isExplicitGCInvokesConcurrentOn && (isUseG1GCon || isUseConcMarkSweepGCon)) || isUseZGCon || isShenandoahGCon) {
+        if ((isExplicitGCInvokesConcurrentOn && isUseG1GCon) || isUseZGCon || isShenandoahGCon) {
             keyPhrase = "GC";
         } else {
             keyPhrase = "Pause Full";
--- a/test/hotspot/jtreg/vmTestbase/nsk/monitoring/stress/thread/strace001.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/monitoring/stress/thread/strace001.java	Thu Nov 14 13:50:03 2019 +0000
@@ -362,7 +362,7 @@
 
         currentDepth++;
         if (maxDepth > currentDepth) {
-            yield();
+            Thread.yield();
             if (mixed) {
                 int result = recursionNative(maxDepth, currentDepth, true);
 
--- a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/JDIEventsDebuggee.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/JDIEventsDebuggee.java	Thu Nov 14 13:50:03 2019 +0000
@@ -232,7 +232,7 @@
 
         public void run() {
             while (!startExecution)
-                yield();
+                Thread.yield();
 
             for (int i = 0; (i < actionsNumber) && !stopExecution; i++)
                 executor.doEventAction();
--- a/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/MonitorEventsDebuggee.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jdi/MonitorEventsDebuggee.java	Thu Nov 14 13:50:03 2019 +0000
@@ -53,7 +53,7 @@
         public void run() {
             // wait when interrupted thread switches state to 'TIMED_WAITING'
             while ((threadToInterrupt.getState() != Thread.State.WAITING) && !exitedFromWait) {
-                yield();
+                Thread.yield();
             }
 
             // threadToInterrupt 'spuriously' exited from wait()
@@ -236,7 +236,7 @@
         public void run() {
             // wait when blocked thread switches state to 'BLOCKED'
             while (blockedThread.getState() != Thread.State.BLOCKED)
-                yield();
+                Thread.yield();
 
             lockingThread.releaseLock();
         }
--- a/test/hotspot/jtreg/vmTestbase/nsk/share/jpda/StateTestThread.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/jpda/StateTestThread.java	Thu Nov 14 13:50:03 2019 +0000
@@ -111,7 +111,7 @@
             start();
 
             while (!isRunning)
-                yield();
+                Thread.yield();
 
             break;
         case 2:
@@ -119,7 +119,7 @@
             isRunning = false;
 
             while (this.getState() != Thread.State.TIMED_WAITING)
-                yield();
+                Thread.yield();
 
             break;
         case 3:
@@ -129,7 +129,7 @@
             interrupt();
 
             while (getState() != Thread.State.WAITING)
-                yield();
+                Thread.yield();
 
             break;
         case 4:
@@ -141,7 +141,7 @@
             }
 
             while (!readyToBeBlocked || (getState() != Thread.State.BLOCKED))
-                yield();
+                Thread.yield();
 
             break;
         case 5:
--- a/test/hotspot/jtreg/vmTestbase/nsk/share/locks/LockingThread.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/locks/LockingThread.java	Thu Nov 14 13:50:03 2019 +0000
@@ -325,7 +325,7 @@
                 throw new TestBug("Locking thread can't reach required state (state: " + requiredState + " wasn't reached) in 1 minute");
             }
 
-            yield();
+            Thread.yield();
         }
 
         requiredState = null;
--- a/test/hotspot/jtreg/vmTestbase/nsk/share/locks/MonitorLockingThread.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/locks/MonitorLockingThread.java	Thu Nov 14 13:50:03 2019 +0000
@@ -78,7 +78,7 @@
              * should already occur) and then force MonitorLockingThread to release lock
              */
             while (blockedThread.getState() != Thread.State.BLOCKED)
-                yield();
+                Thread.yield();
 
             lockingThread.releaseLock();
         }
@@ -98,7 +98,7 @@
         synchronized (lockToHold) {
             holdsLock = true;
             while (isRunning)
-                yield();
+                Thread.yield();
         }
         holdsLock = false;
     }
@@ -106,12 +106,12 @@
     public void releaseLock() {
         isRunning = false;
         while (holdsLock)
-            yield();
+            Thread.yield();
     }
 
     public void acquireLock() {
         start();
         while (!holdsLock)
-            yield();
+            Thread.yield();
     }
 }
--- a/test/hotspot/jtreg/vmTestbase/nsk/share/runner/ThreadsRunner.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/share/runner/ThreadsRunner.java	Thu Nov 14 13:50:03 2019 +0000
@@ -81,7 +81,7 @@
                 stresser.start(runParams.getIterations());
                 while (!this.isInterrupted() && stresser.iteration()) {
                     test.run();
-                    yield();
+                    Thread.yield();
                 }
                 waitForOtherThreads();
             } catch (OutOfMemoryError oom) {
--- a/test/hotspot/jtreg/vmTestbase/nsk/stress/jni/GarbageGenerator.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/stress/jni/GarbageGenerator.java	Thu Nov 14 13:50:03 2019 +0000
@@ -84,7 +84,7 @@
         while (!done) {
             for (g = 0; g < ringSize; g++) {
                 gr.add(allocSize);
-                yield();
+                Thread.yield();
             }
             gr.discard();
             try {
--- a/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace001.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace001.java	Thu Nov 14 13:50:03 2019 +0000
@@ -295,7 +295,7 @@
         }
 
         if (strace001.DEPTH - currentDepth > 0) {
-            yield();
+            Thread.yield();
             recursiveMethod();
         }
 
--- a/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace002.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace002.java	Thu Nov 14 13:50:03 2019 +0000
@@ -304,7 +304,7 @@
         }
 
         if (strace002.DEPTH - currentDepth > 0) {
-            yield();
+            Thread.yield();
             recursiveMethod();
         }
 
--- a/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace005.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace005.java	Thu Nov 14 13:50:03 2019 +0000
@@ -387,7 +387,7 @@
 
         if (strace005.DEPTH - currentDepth > 0) {
             try {
-                yield();
+                Thread.yield();
                 recursiveMethod2();
             } catch (StackOverflowError e) {
                 // ignore this exception
--- a/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace006.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/stress/strace/strace006.java	Thu Nov 14 13:50:03 2019 +0000
@@ -331,7 +331,7 @@
 
         if (strace006.DEPTH - currentDepth > 0) {
             try {
-                yield();
+                Thread.yield();
                 recursiveMethod2();
             } catch (StackOverflowError e) {
                 // ignore this exception
--- a/test/hotspot/jtreg/vmTestbase/nsk/stress/thread/thread005.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/stress/thread/thread005.java	Thu Nov 14 13:50:03 2019 +0000
@@ -160,7 +160,7 @@
      */
     public void run() {
         while (!GO && !timeout())
-            yield();
+            Thread.yield();
         while (!STOP && !timeout())
             ;
     }
--- a/test/hotspot/jtreg/vmTestbase/nsk/stress/thread/thread006.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/hotspot/jtreg/vmTestbase/nsk/stress/thread/thread006.java	Thu Nov 14 13:50:03 2019 +0000
@@ -176,7 +176,7 @@
      */
     public void run() {
         while (!GO && !timeout())
-            yield();
+            Thread.yield();
         while (!STOP && !timeout())
             ;
     }
--- a/test/jdk/ProblemList.txt	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/ProblemList.txt	Thu Nov 14 13:50:03 2019 +0000
@@ -837,6 +837,8 @@
 
 tools/pack200/CommandLineTests.java                             8059906 generic-all
 
+tools/jlink/JLinkReproducibleTest.java                          8217166 windows-all
+
 ############################################################################
 
 # jdk_jdi
@@ -861,7 +863,6 @@
 
 # svc_tools
 
-sun/tools/jstat/jstatClassloadOutput1.sh                        8173942 generic-all
 sun/tools/jhsdb/BasicLauncherTest.java                          8193639,8211767 solaris-all,linux-ppc64,linux-ppc64le
 sun/tools/jhsdb/HeapDumpTest.java                               8193639 solaris-all
 sun/tools/jhsdb/HeapDumpTestWithActiveProcess.java              8230731,8001227 windows-all
--- a/test/jdk/com/sun/jdi/InvokeHangTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/com/sun/jdi/InvokeHangTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -64,7 +64,7 @@
     // This is called from the debugger via invokeMethod
     public double invokeee() {
         System.out.println("Debuggee: invokeee in thread "+Thread.currentThread().toString());
-        yield();
+        Thread.yield();
         return longMethod(2);
     }
     public double longMethod(int n) {
--- a/test/jdk/com/sun/jdi/JdwpListenTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/com/sun/jdi/JdwpListenTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -54,6 +54,10 @@
 
     private static final boolean IsWindows = System.getProperty("os.name").toLowerCase().contains("windows");
 
+    // Set to true to allow testing of attach from wrong address (expected to fail).
+    // It's off by default as it causes test time increase and test interference (see JDK-8231915).
+    private static boolean allowNegativeTesting = false;
+
     public static void main(String[] args) throws Exception {
         List<InetAddress> addresses = getAddresses();
 
@@ -87,6 +91,11 @@
             throws IOException {
         log("\nTest: listen at " + listenAddress + ", attaching from " + connectAddress
                 + ", expected: " + (expectedResult ? "SUCCESS" : "FAILURE"));
+        if (!expectedResult && !allowNegativeTesting) {
+            log("SKIPPED: negative testing is disabled");
+            return;
+        }
+
         log("Starting listening debuggee at " + listenAddress);
         try (Debuggee debuggee = Debuggee.launcher("HelloWorld").setAddress(listenAddress + ":0").launch()) {
             log("Debuggee is listening on " + listenAddress + ":" + debuggee.getAddress());
@@ -103,6 +112,7 @@
                 }
             }
         }
+        log("PASSED");
     }
 
     private static void addAddr(List<InetAddress> list, InetAddress addr) {
--- a/test/jdk/com/sun/jdi/SimulResumerTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/com/sun/jdi/SimulResumerTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -71,7 +71,7 @@
 
     public void bkpt1(int i) {
         synchronized(name1) {
-            yield();
+            Thread.yield();
         }
     }
 
@@ -85,7 +85,7 @@
 
     public void bkpt2(int i) {
         synchronized(name2) {
-            yield();
+            Thread.yield();
         }
     }
 
--- a/test/jdk/com/sun/jdi/TwoThreadsTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/com/sun/jdi/TwoThreadsTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -70,7 +70,7 @@
     }
 
     public void bkpt1(int i) {
-        yield();
+        Thread.yield();
     }
 
     public void run1() {
@@ -82,7 +82,7 @@
     }
 
     public void bkpt2(int i) {
-        yield();
+        Thread.yield();
     }
 
     public void run2() {
--- a/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/com/sun/management/HotSpotDiagnosticMXBean/CheckOrigin.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,6 @@
  * @test
  * @bug 8028994
  * @author Staffan Larsen
- * @comment Graal does not support CMS
- * @requires !vm.graal.enabled
  * @library /test/lib
  * @modules jdk.attach/sun.tools.attach
  *          jdk.management
@@ -63,7 +61,7 @@
             ProcessBuilder pb = ProcessTools.
                 createJavaProcessBuilder(
                     "--add-exports", "jdk.attach/sun.tools.attach=ALL-UNNAMED",
-                    "-XX:+UseConcMarkSweepGC",  // this will cause MaxNewSize to be FLAG_SET_ERGO
+                    "-XX:+UseG1GC",  // this will cause MaxNewSize to be FLAG_SET_ERGO
                     "-XX:+UseCodeAging",
                     "-XX:+UseCerealGC",         // Should be ignored.
                     "-XX:Flags=" + flagsFile.getAbsolutePath(),
@@ -73,8 +71,7 @@
                     "-runtests");
 
             Map<String, String> env = pb.environment();
-            // "UseCMSGC" should be ignored.
-            env.put("_JAVA_OPTIONS", "-XX:+CheckJNICalls -XX:+UseCMSGC");
+            env.put("_JAVA_OPTIONS", "-XX:+CheckJNICalls");
             // "UseGOneGC" should be ignored.
             env.put("JAVA_TOOL_OPTIONS", "-XX:+IgnoreUnrecognizedVMOptions "
                 + "-XX:+PrintVMOptions -XX:+UseGOneGC");
@@ -110,7 +107,7 @@
             checkOrigin("PrintVMQWaitTime", Origin.CONFIG_FILE);
             // Set through j.l.m
             checkOrigin("HeapDumpOnOutOfMemoryError", Origin.MANAGEMENT);
-            // Should be set by the VM, when we set UseConcMarkSweepGC
+            // Should be set by the VM, when we set UseG1GC
             checkOrigin("MaxNewSize", Origin.ERGONOMIC);
             // Set using attach
             checkOrigin("HeapDumpPath", Origin.ATTACH_ON_DEMAND);
--- a/test/jdk/java/awt/Graphics2D/MTGraphicsAccessTest/MTGraphicsAccessTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/awt/Graphics2D/MTGraphicsAccessTest/MTGraphicsAccessTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -197,7 +197,7 @@
                 while (!done) {
                     try {
                         testRunnable.run();
-                        yield();
+                        Thread.yield();
                     } catch (Throwable t) {
                         numexceptions++;
                         t.printStackTrace();
--- a/test/jdk/java/lang/String/Formatted.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/String/Formatted.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * bug 8203444
  * @summary Unit tests for instance versions of String#format
- * @compile --enable-preview -source 14 Formatted.java
+ * @compile --enable-preview -source ${jdk.version} Formatted.java
  * @run main/othervm --enable-preview Formatted
  */
 
--- a/test/jdk/java/lang/String/StripIndent.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/String/StripIndent.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * @bug 8223775
  * @summary This exercises String#stripIndent patterns and limits.
- * @compile --enable-preview -source 14 StripIndent.java
+ * @compile --enable-preview -source ${jdk.version} StripIndent.java
  * @run main/othervm --enable-preview StripIndent
  */
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/lang/String/TEST.properties	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,1 @@
+allowSmartActionArgs=true
--- a/test/jdk/java/lang/String/TranslateEscapes.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/String/TranslateEscapes.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * @bug 8223780
  * @summary This exercises String#translateEscapes patterns and limits.
- * @compile --enable-preview -source 14 TranslateEscapes.java
+ * @compile --enable-preview -source ${jdk.version} TranslateEscapes.java
  * @run main/othervm --enable-preview TranslateEscapes
  */
 
--- a/test/jdk/java/lang/invoke/TryFinallyTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/invoke/TryFinallyTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -24,8 +24,8 @@
  */
 
 /* @test
- * @bug 8139885 8150824 8150825 8194238
- * @run testng/othervm -ea -esa test.java.lang.invoke.TryFinallyTest
+ * @bug 8139885 8150824 8150825 8194238 8233920
+ * @run testng/othervm -ea -esa -Xverify:all test.java.lang.invoke.TryFinallyTest
  */
 
 package test.java.lang.invoke;
@@ -55,6 +55,41 @@
         assertEquals("Hello, world!", hello.invoke("world"));
     }
 
+    @DataProvider
+    static Object[][] tryFinallyArgs() {
+        return new Object[][] {
+                { boolean.class, true },
+                { byte.class, (byte) 2 },
+                { short.class, (short) 2 },
+                { char.class, (char) 2 },
+                { int.class, 2 },
+                { long.class, 2L },
+                { float.class, 2f },
+                { double.class, 2D },
+                { Object.class, new Object() }
+        };
+    }
+
+    @Test(dataProvider = "tryFinallyArgs")
+    public static void testTryFinally(Class<?> argType, Object arg) throws Throwable {
+        MethodHandle identity = MethodHandles.identity(argType);
+        MethodHandle tryFinally = MethodHandles.tryFinally(
+                identity,
+                MethodHandles.dropArguments(identity, 0, Throwable.class));
+        assertEquals(methodType(argType, argType), tryFinally.type());
+        assertEquals(arg, tryFinally.invoke(arg));
+    }
+
+    @Test(dataProvider = "tryFinallyArgs", expectedExceptions = TryFinally.T1.class)
+    public static void testTryFinallyException(Class<?> argType, Object arg) throws Throwable {
+        MethodHandle identity = TryFinally.MH_throwingTargetIdentity.asType(methodType(argType, argType));
+        MethodHandle tryFinally = MethodHandles.tryFinally(
+                identity,
+                MethodHandles.dropArguments(identity, 0, TryFinally.T1.class));
+        assertEquals(methodType(argType, argType), tryFinally.type());
+        tryFinally.invoke(arg); // should throw
+    }
+
     @Test
     public static void testTryFinallyVoid() throws Throwable {
         MethodHandle tfVoid = MethodHandles.tryFinally(TryFinally.MH_print, TryFinally.MH_printMore);
@@ -175,6 +210,10 @@
             throw new T1();
         }
 
+        static Object throwingTargetIdentity(Object o) throws Throwable {
+            throw new T1();
+        }
+
         static void catchingCleanup(T2 t) throws Throwable {
         }
 
@@ -189,6 +228,7 @@
         static final MethodType MT_voidTarget = methodType(void.class);
         static final MethodType MT_voidCleanup = methodType(void.class, Throwable.class);
         static final MethodType MT_throwingTarget = methodType(void.class);
+        static final MethodType MT_throwingTargetIdentity = methodType(Object.class, Object.class);
         static final MethodType MT_catchingCleanup = methodType(void.class, T2.class);
 
         static final MethodHandle MH_greet;
@@ -200,6 +240,7 @@
         static final MethodHandle MH_voidTarget;
         static final MethodHandle MH_voidCleanup;
         static final MethodHandle MH_throwingTarget;
+        static final MethodHandle MH_throwingTargetIdentity;
         static final MethodHandle MH_catchingCleanup;
 
         static final MethodHandle MH_dummyTarget;
@@ -219,6 +260,7 @@
                 MH_voidTarget = LOOKUP.findStatic(TRY_FINALLY, "voidTarget", MT_voidTarget);
                 MH_voidCleanup = LOOKUP.findStatic(TRY_FINALLY, "voidCleanup", MT_voidCleanup);
                 MH_throwingTarget = LOOKUP.findStatic(TRY_FINALLY, "throwingTarget", MT_throwingTarget);
+                MH_throwingTargetIdentity = LOOKUP.findStatic(TRY_FINALLY, "throwingTargetIdentity", MT_throwingTargetIdentity);
                 MH_catchingCleanup = LOOKUP.findStatic(TRY_FINALLY, "catchingCleanup", MT_catchingCleanup);
                 MH_dummyTarget = MethodHandles.dropArguments(MH_voidTarget, 0, int.class, long.class, Object.class,
                         int.class, long.class, Object.class);
--- a/test/jdk/java/lang/management/GarbageCollectorMXBean/GcInfoCompositeType.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/management/GarbageCollectorMXBean/GcInfoCompositeType.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
  * @run     main/othervm -XX:-ExplicitGCInvokesConcurrent GcInfoCompositeType
  */
 // Passing "-XX:-ExplicitGCInvokesConcurrent" to force System.gc()
-// run on foreground when CMS is used and prevent situations when "GcInfo"
+// run on foreground when a concurrent collector is used and prevent situations when "GcInfo"
 // is missing even though System.gc() was successfuly processed.
 
 import java.util.*;
--- a/test/jdk/java/lang/management/MemoryMXBean/CollectionUsageThreshold.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/management/MemoryMXBean/CollectionUsageThreshold.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -76,9 +76,6 @@
         RunUtil.runTestClearGcOpts(main, "-XX:+UseSerialGC");
         RunUtil.runTestClearGcOpts(main, "-XX:+UseParallelGC");
         RunUtil.runTestClearGcOpts(main, "-XX:+UseG1GC");
-        if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
-            RunUtil.runTestClearGcOpts(main, "-XX:+UseConcMarkSweepGC");
-        }
     }
 
     static class PoolRecord {
--- a/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,9 +84,6 @@
         traceTest(classMain + ", -XX:+UseSerialGC", nmFlag, lpFlag, "-XX:+UseSerialGC");
         traceTest(classMain + ", -XX:+UseParallelGC", nmFlag, lpFlag, "-XX:+UseParallelGC");
         traceTest(classMain + ", -XX:+UseG1GC", nmFlag, lpFlag, "-XX:+UseG1GC", g1Flag);
-        if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
-            traceTest(classMain + ", -XX:+UseConcMarkSweepGC", nmFlag, lpFlag, "-XX:+UseConcMarkSweepGC");
-        }
     }
 
     /*
@@ -169,15 +166,10 @@
     }
 
     static class TestListener implements NotificationListener {
-        private boolean isRelaxed = false;
         private int triggers = 0;
         private final long[] count = new long[NUM_TRIGGERS * 2];
         private final long[] usedMemory = new long[NUM_TRIGGERS * 2];
 
-        public TestListener() {
-            isRelaxed = ManagementFactory.getRuntimeMXBean().getInputArguments().contains("-XX:+UseConcMarkSweepGC");
-        }
-
         @Override
         public void handleNotification(Notification notif, Object handback) {
             MemoryNotificationInfo minfo = MemoryNotificationInfo.
@@ -212,11 +204,7 @@
         }
 
         private boolean checkValue(long value, int target) {
-            if (!isRelaxed) {
-                return value == target;
-            } else {
-                return value >= target;
-            }
+            return value == target;
         }
     }
 
--- a/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest2.sh	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/management/MemoryMXBean/LowMemoryTest2.sh	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -59,7 +59,6 @@
 
 go -noclassgc -XX:MaxMetaspaceSize=32m -XX:+UseSerialGC LowMemoryTest2
 go -noclassgc -XX:MaxMetaspaceSize=32m -XX:+UseParallelGC LowMemoryTest2
-go -noclassgc -XX:MaxMetaspaceSize=32m -XX:+UseConcMarkSweepGC LowMemoryTest2
 
 # Test class metaspace - might hit MaxMetaspaceSize instead if
 # UseCompressedClassPointers is off or if 32 bit.
--- a/test/jdk/java/lang/management/MemoryMXBean/MemoryManagementConcMarkSweepGC.sh	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-#
-# Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-#
-# @test
-# @bug     4530538
-# @summary Run MemoryManagement test with concurrent mark sweep GC
-# @author  Mandy Chung
-#
-# @requires (vm.gc=="ConcMarkSweep" | vm.gc=="null") & !vm.graal.enabled
-#
-# @run build MemoryManagement
-# @run shell/timeout=600 MemoryManagementConcMarkSweepGC.sh
-#
-
-#Set appropriate jdk
-
-if [ ! -z "${TESTJAVA}" ] ; then
-     jdk="$TESTJAVA"
-else
-     echo "--Error: TESTJAVA must be defined as the pathname of a jdk to test."
-     exit 1
-fi
-
-runOne()
-{
-   echo "runOne $@"
-   $TESTJAVA/bin/java ${TESTVMOPTS} -classpath $TESTCLASSES $@ || exit 2
-}
-
-# Test MemoryManagement with concurrent collector
-runOne -XX:+UseConcMarkSweepGC MemoryManagement
-
-exit 0
--- a/test/jdk/java/lang/management/MemoryMXBean/PendingAllGC.sh	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/management/MemoryMXBean/PendingAllGC.sh	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -54,7 +54,4 @@
 # Test Pending with parallel scavenger collector
 runOne -XX:+UseParallelGC Pending 
 
-# Test Pending with concurrent collector
-runOne -XX:+UseConcMarkSweepGC Pending
-
 exit 0
--- a/test/jdk/java/lang/management/MemoryMXBean/ResetPeakMemoryUsage.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/lang/management/MemoryMXBean/ResetPeakMemoryUsage.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -63,9 +63,7 @@
         final String main = "ResetPeakMemoryUsage$TestMain";
         final String ms = "-Xms256m";
         final String mn = "-Xmn8m";
-        if (!Compiler.isGraalEnabled()) { // Graal does not support CMS
-            RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseConcMarkSweepGC");
-        }
+
         RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseParallelGC");
         RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseG1GC", "-XX:G1HeapRegionSize=1m");
         RunUtil.runTestClearGcOpts(main, ms, mn, "-XX:+UseSerialGC",
--- a/test/jdk/java/net/CookieHandler/B6791927.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/net/CookieHandler/B6791927.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,8 +23,9 @@
 
 /**
  * @test
- * @bug 6791927
+ * @bug 6791927 8233886
  * @summary Wrong Locale in HttpCookie::expiryDate2DeltaSeconds
+ * @run main/othervm B6791927
  */
 
 import java.net.*;
@@ -32,12 +33,14 @@
 import java.util.Locale;
 
 public class B6791927 {
-    public static final void main( String[] aaParamters ) throws Exception{
+    public static final void main(String[] aaParamters) throws Exception {
         Locale reservedLocale = Locale.getDefault();
         try {
             // Forces a non US locale
             Locale.setDefault(Locale.FRANCE);
-            List<HttpCookie> cookies = HttpCookie.parse("set-cookie: CUSTOMER=WILE_E_COYOTE; expires=Sat, 09-Nov-2019 23:12:40 GMT");
+            List<HttpCookie> cookies = HttpCookie.parse("set-cookie:" +
+                    " CUSTOMER=WILE_E_COYOTE;" +
+                    " expires=Sat, 09-Nov-2041 23:12:40 GMT");
             if (cookies == null || cookies.isEmpty()) {
                 throw new RuntimeException("No cookie found");
             }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/net/httpclient/AuthFilterCacheTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,298 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.io.IOException;
+import java.net.*;
+import java.net.http.HttpClient;
+import java.net.http.HttpRequest;
+import java.net.http.HttpResponse;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.*;
+import java.util.concurrent.atomic.AtomicLong;
+
+import com.sun.net.httpserver.HttpServer;
+import com.sun.net.httpserver.HttpsConfigurator;
+import com.sun.net.httpserver.HttpsServer;
+import org.testng.annotations.AfterClass;
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.DataProvider;
+import org.testng.annotations.Test;
+
+import javax.net.ssl.SSLContext;
+
+/**
+ * @test
+ * @bug 8232853
+ * @summary AuthenticationFilter.Cache::remove may throw ConcurrentModificationException
+ * @library /test/lib http2/server
+ * @build jdk.test.lib.net.SimpleSSLContext HttpServerAdapters DigestEchoServer HttpRedirectTest
+ * @modules java.net.http/jdk.internal.net.http.common
+ * java.net.http/jdk.internal.net.http.frame
+ * java.net.http/jdk.internal.net.http.hpack
+ * java.logging
+ * java.base/sun.net.www.http
+ * java.base/sun.net.www
+ * java.base/sun.net
+ * @run testng/othervm -Dtest.requiresHost=true
+ * -Djdk.httpclient.HttpClient.log=headers
+ * -Djdk.internal.httpclient.debug=false
+ * AuthFilterCacheTest
+ */
+
+public class AuthFilterCacheTest implements HttpServerAdapters {
+
+    static final String RESPONSE_BODY = "Hello World!";
+    static final int REQUEST_COUNT = 5;
+    static final int URI_COUNT = 6;
+    static final CyclicBarrier barrier = new CyclicBarrier(REQUEST_COUNT * URI_COUNT);
+    static final SSLContext context;
+
+    static {
+        try {
+            context = new jdk.test.lib.net.SimpleSSLContext().get();
+            SSLContext.setDefault(context);
+        } catch (Exception x) {
+            throw new ExceptionInInitializerError(x);
+        }
+    }
+
+    HttpTestServer http1Server;
+    HttpTestServer http2Server;
+    HttpTestServer https1Server;
+    HttpTestServer https2Server;
+    DigestEchoServer.TunnelingProxy proxy;
+    URI http1URI;
+    URI https1URI;
+    URI http2URI;
+    URI https2URI;
+    InetSocketAddress proxyAddress;
+    ProxySelector proxySelector;
+    MyAuthenticator auth;
+    HttpClient client;
+    Executor executor = Executors.newCachedThreadPool();
+
+    @DataProvider(name = "uris")
+    Object[][] testURIs() {
+        Object[][] uris = new Object[][]{
+                {List.of(http1URI.resolve("direct/orig/"),
+                        https1URI.resolve("direct/orig/"),
+                        https1URI.resolve("proxy/orig/"),
+                        http2URI.resolve("direct/orig/"),
+                        https2URI.resolve("direct/orig/"),
+                        https2URI.resolve("proxy/orig/"))}
+        };
+        return uris;
+    }
+
+    public HttpClient newHttpClient(ProxySelector ps, Authenticator auth) {
+        HttpClient.Builder builder = HttpClient
+                .newBuilder()
+                .sslContext(context)
+                .authenticator(auth)
+                .proxy(ps);
+        return builder.build();
+    }
+
+    @BeforeClass
+    public void setUp() throws Exception {
+        try {
+            InetSocketAddress sa =
+                    new InetSocketAddress(InetAddress.getLoopbackAddress(), 0);
+            auth = new MyAuthenticator();
+
+            // HTTP/1.1
+            HttpServer server1 = HttpServer.create(sa, 0);
+            server1.setExecutor(executor);
+            http1Server = HttpTestServer.of(server1);
+            http1Server.addHandler(new TestHandler(), "/AuthFilterCacheTest/http1/");
+            http1Server.start();
+            http1URI = new URI("http://" + http1Server.serverAuthority()
+                    + "/AuthFilterCacheTest/http1/");
+
+            // HTTPS/1.1
+            HttpsServer sserver1 = HttpsServer.create(sa, 100);
+            sserver1.setExecutor(executor);
+            sserver1.setHttpsConfigurator(new HttpsConfigurator(context));
+            https1Server = HttpTestServer.of(sserver1);
+            https1Server.addHandler(new TestHandler(), "/AuthFilterCacheTest/https1/");
+            https1Server.start();
+            https1URI = new URI("https://" + https1Server.serverAuthority()
+                    + "/AuthFilterCacheTest/https1/");
+
+            // HTTP/2.0
+            http2Server = HttpTestServer.of(
+                    new Http2TestServer("localhost", false, 0));
+            http2Server.addHandler(new TestHandler(), "/AuthFilterCacheTest/http2/");
+            http2Server.start();
+            http2URI = new URI("http://" + http2Server.serverAuthority()
+                    + "/AuthFilterCacheTest/http2/");
+
+            // HTTPS/2.0
+            https2Server = HttpTestServer.of(
+                    new Http2TestServer("localhost", true, 0));
+            https2Server.addHandler(new TestHandler(), "/AuthFilterCacheTest/https2/");
+            https2Server.start();
+            https2URI = new URI("https://" + https2Server.serverAuthority()
+                    + "/AuthFilterCacheTest/https2/");
+
+            proxy = DigestEchoServer.createHttpsProxyTunnel(
+                    DigestEchoServer.HttpAuthSchemeType.NONE);
+            proxyAddress = proxy.getProxyAddress();
+            proxySelector = new HttpProxySelector(proxyAddress);
+            client = newHttpClient(proxySelector, auth);
+
+            System.out.println("Setup: done");
+        } catch (Exception x) {
+            tearDown();
+            throw x;
+        } catch (Error e) {
+            tearDown();
+            throw e;
+        }
+    }
+
+    @AfterClass
+    public void tearDown() {
+        proxy = stop(proxy, DigestEchoServer.TunnelingProxy::stop);
+        http1Server = stop(http1Server, HttpTestServer::stop);
+        https1Server = stop(https1Server, HttpTestServer::stop);
+        http2Server = stop(http2Server, HttpTestServer::stop);
+        https2Server = stop(https2Server, HttpTestServer::stop);
+        client = null;
+
+        System.out.println("Teardown: done");
+    }
+
+    private interface Stoppable<T> {
+        void stop(T service) throws Exception;
+    }
+
+    static <T> T stop(T service, Stoppable<T> stop) {
+        try {
+            if (service != null) stop.stop(service);
+        } catch (Throwable x) {
+        }
+        return null;
+    }
+
+    static class HttpProxySelector extends ProxySelector {
+        private static final List<Proxy> NO_PROXY = List.of(Proxy.NO_PROXY);
+        private final List<Proxy> proxyList;
+
+        HttpProxySelector(InetSocketAddress proxyAddress) {
+            proxyList = List.of(new Proxy(Proxy.Type.HTTP, proxyAddress));
+        }
+
+        @Override
+        public List<Proxy> select(URI uri) {
+            // Our proxy only supports tunneling
+            if (uri.getScheme().equalsIgnoreCase("https")) {
+                if (uri.getPath().contains("/proxy/")) {
+                    return proxyList;
+                }
+            }
+            return NO_PROXY;
+        }
+
+        @Override
+        public void connectFailed(URI uri, SocketAddress sa, IOException ioe) {
+            System.err.println("Connection to proxy failed: " + ioe);
+            System.err.println("Proxy: " + sa);
+            System.err.println("\tURI: " + uri);
+            ioe.printStackTrace();
+        }
+    }
+
+    public static class TestHandler implements HttpTestHandler {
+        static final AtomicLong respCounter = new AtomicLong();
+
+        @Override
+        public void handle(HttpTestExchange t) throws IOException {
+            var count = respCounter.incrementAndGet();
+            System.out.println("Responses handled: " + count);
+            t.getRequestBody().readAllBytes();
+
+            if (t.getRequestMethod().equalsIgnoreCase("GET")) {
+                if (!t.getRequestHeaders().containsKey("Authorization")) {
+                    t.getResponseHeaders()
+                            .addHeader("WWW-Authenticate", "Basic realm=\"Earth\"");
+                    t.sendResponseHeaders(401, 0);
+                } else {
+                    byte[] resp = RESPONSE_BODY.getBytes(StandardCharsets.UTF_8);
+                    t.sendResponseHeaders(200, resp.length);
+                    try {
+                        barrier.await();
+                    } catch (Exception e) {
+                        throw new IOException(e);
+                    }
+                    t.getResponseBody().write(resp);
+                }
+            }
+            t.close();
+        }
+    }
+
+    void doClient(List<URI> uris) {
+        assert uris.size() == URI_COUNT;
+        barrier.reset();
+        System.out.println("Client opening connection to: " + uris.toString());
+
+        List<CompletableFuture<HttpResponse<String>>> cfs = new ArrayList<>();
+
+        for (int i = 0; i < REQUEST_COUNT; i++) {
+            for (URI uri : uris) {
+                HttpRequest req = HttpRequest.newBuilder()
+                        .uri(uri)
+                        .build();
+                cfs.add(client.sendAsync(req, HttpResponse.BodyHandlers.ofString()));
+            }
+        }
+        CompletableFuture.allOf(cfs.toArray(new CompletableFuture[0])).join();
+    }
+
+    static class MyAuthenticator extends Authenticator {
+        private int count = 0;
+
+        MyAuthenticator() {
+            super();
+        }
+
+        public PasswordAuthentication getPasswordAuthentication() {
+            System.out.println("Authenticator called: " + ++count);
+            return (new PasswordAuthentication("user" + count,
+                    ("passwordNotCheckedAnyway" + count).toCharArray()));
+        }
+
+        public int getCount() {
+            return count;
+        }
+    }
+
+    @Test(dataProvider = "uris")
+    public void test(List<URI> uris) throws Exception {
+        System.out.println("Server listening at " + uris.toString());
+        doClient(uris);
+    }
+}
--- a/test/jdk/java/nio/channels/DatagramChannel/MulticastSendReceiveTests.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/nio/channels/DatagramChannel/MulticastSendReceiveTests.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,11 +22,12 @@
  */
 
 /* @test
- * @bug 4527345 7026376 6633549
+ * @bug 4527345 7026376 6633549 8233435
  * @summary Unit test for DatagramChannel's multicast support
  * @library /test/lib
  * @build jdk.test.lib.NetworkConfiguration
  *        jdk.test.lib.Platform
+ *        jdk.test.lib.net.IPSupport
  *        MulticastSendReceiveTests
  * @run main MulticastSendReceiveTests
  * @run main/othervm -Djava.net.preferIPv4Stack=true MulticastSendReceiveTests
@@ -41,6 +42,7 @@
 import java.io.IOException;
 import java.util.stream.Collectors;
 
+import jdk.test.lib.Platform;
 import jdk.test.lib.NetworkConfiguration;
 import jdk.test.lib.net.IPSupport;
 
@@ -242,23 +244,37 @@
     public static void main(String[] args) throws IOException {
         IPSupport.throwSkippedExceptionIfNonOperational();
 
+        // IPv4 and IPv6 interfaces that support multicasting
         NetworkConfiguration config = NetworkConfiguration.probe();
+        List<NetworkInterface> ip4MulticastInterfaces = config.ip4MulticastInterfaces()
+                .collect(Collectors.toList());
+        List<NetworkInterface> ip6MulticastInterfaces = config.ip6MulticastInterfaces()
+                .collect(Collectors.toList());
 
         // multicast groups used for the test
         InetAddress ip4Group = InetAddress.getByName("225.4.5.6");
         InetAddress ip6Group = InetAddress.getByName("ff02::a");
-        for (NetworkInterface nif: config.ip4MulticastInterfaces()
-                                         .collect(Collectors.toList())) {
+
+        // Platforms that allow dual sockets join IPv4 multicast groups
+        boolean canIPv6JoinIPv4Group =
+                Platform.isLinux() ||
+                Platform.isOSX() ||
+                Platform.isSolaris() ||
+                Platform.isWindows();
+
+        for (NetworkInterface nif : ip4MulticastInterfaces) {
             InetAddress source = config.ip4Addresses(nif).iterator().next();
+            test(UNSPEC, nif, ip4Group, source);
             test(INET,   nif, ip4Group, source);
-            test(UNSPEC, nif, ip4Group, source);
+            if (IPSupport.hasIPv6() && canIPv6JoinIPv4Group) {
+                test(INET6,  nif, ip4Group, source);
+            }
         }
 
-        for (NetworkInterface nif: config.ip6MulticastInterfaces()
-                                         .collect(Collectors.toList())) {
+        for (NetworkInterface nif : ip6MulticastInterfaces) {
             InetAddress source = config.ip6Addresses(nif).iterator().next();
+            test(UNSPEC, nif, ip6Group, source);
             test(INET6,  nif, ip6Group, source);
-            test(UNSPEC, nif, ip6Group, source);
         }
     }
 }
--- a/test/jdk/java/nio/channels/DatagramChannel/SocketOptionTests.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/nio/channels/DatagramChannel/SocketOptionTests.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,10 +22,15 @@
  */
 
 /* @test
- * @bug 4640544 8044773
+ * @bug 4640544 8044773 8233435
  * @summary Unit test for setOption/getOption/options methods
  * @requires !vm.graal.enabled
+ * @library /test/lib
+ * @build jdk.test.lib.net.IPSupport
+ *        jdk.test.lib.NetworkConfiguration
+ *        SocketOptionTests
  * @run main SocketOptionTests
+ * @run main/othervm -Djava.net.preferIPv4Stack=true SocketOptionTests
  * @run main/othervm --limit-modules=java.base SocketOptionTests
  */
 
@@ -34,23 +39,61 @@
 import java.net.*;
 import java.io.IOException;
 import java.util.*;
+import static java.net.StandardProtocolFamily.*;
 import static java.net.StandardSocketOptions.*;
 
+import jdk.test.lib.NetworkConfiguration;
+import jdk.test.lib.net.IPSupport;
+
 public class SocketOptionTests {
 
-    static <T> void checkOption(DatagramChannel dc,
-                                SocketOption<T> name,
-                                T expectedValue)
-        throws IOException
-    {
-        T value = dc.getOption(name);
-        if (!value.equals(expectedValue))
-            throw new RuntimeException("value not as expected");
+    public static void main(String[] args) throws IOException {
+        IPSupport.throwSkippedExceptionIfNonOperational();
+
+        NetworkConfiguration config = NetworkConfiguration.probe();
+        InetAddress ip4Address = config.ip4Addresses().findAny().orElse(null);
+        InetAddress ip6Address = config.ip6Addresses().findAny().orElse(null);
+
+        System.out.println("[UNSPEC, bound to wildcard address]");
+        try (DatagramChannel dc = DatagramChannel.open()) {
+            test(dc, new InetSocketAddress(0));
+        }
+
+        if (IPSupport.hasIPv4()) {
+            System.out.println("[INET, bound to wildcard address]");
+            try (DatagramChannel dc = DatagramChannel.open(INET)) {
+                test(dc, new InetSocketAddress(0));
+            }
+            System.out.println("[INET, bound to IPv4 address]");
+            try (DatagramChannel dc = DatagramChannel.open(INET)) {
+                test(dc, new InetSocketAddress(ip4Address, 0));
+            }
+        }
+
+        if (IPSupport.hasIPv6()) {
+            System.out.println("[INET6, bound to wildcard address]");
+            try (DatagramChannel dc = DatagramChannel.open(INET6)) {
+                test(dc, new InetSocketAddress(0));
+            }
+            System.out.println("[INET6, bound to IPv6 address]");
+            try (DatagramChannel dc = DatagramChannel.open(INET6)) {
+                test(dc, new InetSocketAddress(ip6Address, 0));
+            }
+        }
+
+        if (IPSupport.hasIPv4() && IPSupport.hasIPv6()) {
+            System.out.println("[UNSPEC, bound to IPv4 address]");
+            try (DatagramChannel dc = DatagramChannel.open()) {
+                test(dc, new InetSocketAddress(ip4Address, 0));
+            }
+            System.out.println("[INET6, bound to IPv4 address]");
+            try (DatagramChannel dc = DatagramChannel.open(INET6)) {
+                test(dc, new InetSocketAddress(ip4Address, 0));
+            }
+        }
     }
 
-    public static void main(String[] args) throws IOException {
-        DatagramChannel dc = DatagramChannel.open();
-
+    static void test(DatagramChannel dc, SocketAddress localAddress) throws IOException {
         // check supported options
         Set<SocketOption<?>> options = dc.supportedOptions();
         boolean reuseport = options.contains(SO_REUSEPORT);
@@ -101,7 +144,7 @@
             checkOption(dc, SO_REUSEPORT, false);
         }
         // bind socket
-        dc.bind(new InetSocketAddress(0));
+        dc.bind(localAddress);
 
         // allow to change when bound
         dc.setOption(SO_BROADCAST, true);
@@ -116,7 +159,6 @@
         dc.setOption(IP_MULTICAST_LOOP, true);
         checkOption(dc, IP_MULTICAST_LOOP, true);
 
-
         // NullPointerException
         try {
             dc.setOption(null, "value");
@@ -137,4 +179,14 @@
         } catch (ClosedChannelException x) {
         }
     }
+
+    static <T> void checkOption(DatagramChannel dc,
+                                SocketOption<T> name,
+                                T expectedValue)
+        throws IOException
+    {
+        T value = dc.getOption(name);
+        if (!value.equals(expectedValue))
+            throw new RuntimeException("value not as expected");
+    }
 }
--- a/test/jdk/java/nio/channels/Selector/SelectWithConsumer.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/nio/channels/Selector/SelectWithConsumer.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -197,19 +197,21 @@
             // write to sink to ensure that the source is readable
             sink.write(messageBuffer());
 
+            // wait for key1 to be readable
+            sel.select();
+            assertTrue(key2.isWritable());
+            while (!key1.isReadable()) {
+                Thread.sleep(20);
+                sel.select();
+            }
+
             var counter = new AtomicInteger();
 
             // select(Consumer)
             counter.set(0);
             int n = sel.select(k -> {
+                assertTrue(k == key1 || k == key2);
                 counter.incrementAndGet();
-                if (k == key1) {
-                    assertTrue(k.isReadable());
-                } else if (k == key2) {
-                    assertTrue(k.isWritable());
-                } else {
-                    assertTrue(false);
-                }
             });
             assertTrue(n == 2);
             assertTrue(counter.get() == 2);
@@ -217,14 +219,8 @@
             // select(Consumer, timeout)
             counter.set(0);
             n = sel.select(k -> {
+                assertTrue(k == key1 || k == key2);
                 counter.incrementAndGet();
-                if (k == key1) {
-                    assertTrue(k.isReadable());
-                } else if (k == key2) {
-                    assertTrue(k.isWritable());
-                } else {
-                    assertTrue(false);
-                }
             }, 1000);
             assertTrue(n == 2);
             assertTrue(counter.get() == 2);
@@ -232,14 +228,8 @@
             // selectNow(Consumer)
             counter.set(0);
             n = sel.selectNow(k -> {
+                assertTrue(k == key1 || k == key2);
                 counter.incrementAndGet();
-                if (k == key1) {
-                    assertTrue(k.isReadable());
-                } else if (k == key2) {
-                    assertTrue(k.isWritable());
-                } else {
-                    assertTrue(false);
-                }
             });
             assertTrue(n == 2);
             assertTrue(counter.get() == 2);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/nio/channels/Selector/StackOverflowTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2019, Red Hat, Inc. and/or its affiliates.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8216472
+ * @summary native call in WindowsSelectorImpl.SubSelector.poll can use
+ *     more stack space than available in a shadow zone, this can cause
+ *     a crash if selector is called from a deep recursive java call
+ * @requires (os.family == "windows")
+ */
+
+import java.nio.channels.Selector;
+
+public class StackOverflowTest {
+
+    public static void main(String[] args) throws Exception {
+        try (var sel = Selector.open()) {
+            recursiveSelect(sel);
+        } catch (StackOverflowError e) {
+            // ignore SOE from java calls
+        }
+    }
+
+    static void recursiveSelect(Selector sel) throws Exception {
+        sel.selectNow();
+        recursiveSelect(sel);
+    }
+}
--- a/test/jdk/java/util/Arrays/ParallelSorting.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2067 +0,0 @@
-/*
- * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-/* Adapted from test/java/util/Arrays/Sorting.java
- *
- * Where that test checks Arrays.sort against manual quicksort routines,
- * this test checks parallelSort against either Arrays.sort or manual
- * quicksort routines.
- */
-
-/*
- * @test
- * @bug 8003981
- * @run main ParallelSorting -shortrun
- * @summary Exercise Arrays.parallelSort (adapted from test Sorting)
- *
- * @author Vladimir Yaroslavskiy
- * @author Jon Bentley
- * @author Josh Bloch
- */
-
-import java.util.Arrays;
-import java.util.Random;
-import java.io.PrintStream;
-import java.util.Comparator;
-
-public class ParallelSorting {
-    private static final PrintStream out = System.out;
-    private static final PrintStream err = System.err;
-
-    // Array lengths used in a long run (default)
-    private static final int[] LONG_RUN_LENGTHS = {
-        1000, 10000, 100000, 1000000 };
-
-    // Array lengths used in a short run
-    private static final int[] SHORT_RUN_LENGTHS = {
-        5000, 9000, 10000, 12000 };
-
-    // Random initial values used in a long run (default)
-    private static final long[] LONG_RUN_RANDOMS = { 666, 0xC0FFEE, 999 };
-
-    // Random initial values used in a short run
-    private static final long[] SHORT_RUN_RANDOMS = { 666 };
-
-    public static void main(String[] args) {
-        boolean shortRun = args.length > 0 && args[0].equals("-shortrun");
-        long start = System.currentTimeMillis();
-
-        if (shortRun) {
-            testAndCheck(SHORT_RUN_LENGTHS, SHORT_RUN_RANDOMS);
-        } else {
-            testAndCheck(LONG_RUN_LENGTHS, LONG_RUN_RANDOMS);
-        }
-        long end = System.currentTimeMillis();
-
-        out.format("PASSED in %d sec.\n", Math.round((end - start) / 1E3));
-    }
-
-    private static void testAndCheck(int[] lengths, long[] randoms) {
-        testEmptyAndNullIntArray();
-        testEmptyAndNullLongArray();
-        testEmptyAndNullShortArray();
-        testEmptyAndNullCharArray();
-        testEmptyAndNullByteArray();
-        testEmptyAndNullFloatArray();
-        testEmptyAndNullDoubleArray();
-
-        for (int length : lengths) {
-            testMergeSort(length);
-            testAndCheckRange(length);
-            testAndCheckSubArray(length);
-        }
-        for (long seed : randoms) {
-            for (int length : lengths) {
-                testAndCheckWithInsertionSort(length, new MyRandom(seed));
-                testAndCheckWithCheckSum(length, new MyRandom(seed));
-                testAndCheckWithScrambling(length, new MyRandom(seed));
-                testAndCheckFloat(length, new MyRandom(seed));
-                testAndCheckDouble(length, new MyRandom(seed));
-                testStable(length, new MyRandom(seed));
-            }
-        }
-    }
-
-    private static void testEmptyAndNullIntArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.parallelSort(new int[]{});
-        Arrays.parallelSort(new int[]{}, 0, 0);
-
-        try {
-            Arrays.parallelSort((int[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.parallelSort((int[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.parallelSort(int[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.parallelSort(int[]) shouldn't catch null array");
-    }
-
-    private static void testEmptyAndNullLongArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.parallelSort(new long[]{});
-        Arrays.parallelSort(new long[]{}, 0, 0);
-
-        try {
-            Arrays.parallelSort((long[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.parallelSort((long[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.parallelSort(long[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.parallelSort(long[]) shouldn't catch null array");
-    }
-
-    private static void testEmptyAndNullShortArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.parallelSort(new short[]{});
-        Arrays.parallelSort(new short[]{}, 0, 0);
-
-        try {
-            Arrays.parallelSort((short[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.parallelSort((short[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.parallelSort(short[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.parallelSort(short[]) shouldn't catch null array");
-    }
-
-    private static void testEmptyAndNullCharArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.parallelSort(new char[]{});
-        Arrays.parallelSort(new char[]{}, 0, 0);
-
-        try {
-            Arrays.parallelSort((char[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.parallelSort((char[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.parallelSort(char[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.parallelSort(char[]) shouldn't catch null array");
-    }
-
-    private static void testEmptyAndNullByteArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.parallelSort(new byte[]{});
-        Arrays.parallelSort(new byte[]{}, 0, 0);
-
-        try {
-            Arrays.parallelSort((byte[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.parallelSort((byte[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.parallelSort(byte[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.parallelSort(byte[]) shouldn't catch null array");
-    }
-
-    private static void testEmptyAndNullFloatArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.parallelSort(new float[]{});
-        Arrays.parallelSort(new float[]{}, 0, 0);
-
-        try {
-            Arrays.parallelSort((float[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.parallelSort((float[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.parallelSort(float[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.parallelSort(float[]) shouldn't catch null array");
-    }
-
-    private static void testEmptyAndNullDoubleArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.parallelSort(new double[]{});
-        Arrays.parallelSort(new double[]{}, 0, 0);
-
-        try {
-            Arrays.parallelSort((double[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.parallelSort((double[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.parallelSort(double[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.parallelSort(double[]) shouldn't catch null array");
-    }
-
-    private static void testAndCheckSubArray(int length) {
-        ourDescription = "Check sorting of subarray";
-        int[] golden = new int[length];
-        boolean newLine = false;
-
-        for (int m = 1; m < length / 2; m *= 2) {
-            newLine = true;
-            int fromIndex = m;
-            int toIndex = length - m;
-
-            prepareSubArray(golden, fromIndex, toIndex, m);
-            int[] test = golden.clone();
-
-            for (TypeConverter converter : TypeConverter.values()) {
-                out.println("Test 'subarray': " + converter +
-                   " length = " + length + ", m = " + m);
-                Object convertedGolden = converter.convert(golden);
-                Object convertedTest = converter.convert(test);
-                sortSubArray(convertedTest, fromIndex, toIndex);
-                checkSubArray(convertedTest, fromIndex, toIndex, m);
-            }
-        }
-        if (newLine) {
-            out.println();
-        }
-    }
-
-    private static void testAndCheckRange(int length) {
-        ourDescription = "Check range check";
-        int[] golden = new int[length];
-
-        for (int m = 1; m < 2 * length; m *= 2) {
-            for (int i = 1; i <= length; i++) {
-                golden[i - 1] = i % m + m % i;
-            }
-            for (TypeConverter converter : TypeConverter.values()) {
-                out.println("Test 'range': " + converter +
-                   ", length = " + length + ", m = " + m);
-                Object convertedGolden = converter.convert(golden);
-                checkRange(convertedGolden, m);
-            }
-        }
-        out.println();
-    }
-
-    private static void testStable(int length, MyRandom random) {
-        ourDescription = "Check if sorting is stable";
-        Pair[] a = build(length, random);
-
-        out.println("Test 'stable': " + "random = " + random.getSeed() +
-            ", length = " + length);
-        Arrays.parallelSort(a);
-        checkSorted(a);
-        checkStable(a);
-        out.println();
-
-        a = build(length, random);
-
-        out.println("Test 'stable' comparator: " + "random = " + random.getSeed() +
-            ", length = " + length);
-        Arrays.parallelSort(a, pairCmp);
-        checkSorted(a);
-        checkStable(a);
-        out.println();
-
-    }
-
-    private static void checkSorted(Pair[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i].getKey() > a[i + 1].getKey()) {
-                failedSort(i, "" + a[i].getKey(), "" + a[i + 1].getKey());
-            }
-        }
-    }
-
-    private static void checkStable(Pair[] a) {
-        for (int i = 0; i < a.length / 4; ) {
-            int key1 = a[i].getKey();
-            int value1 = a[i++].getValue();
-            int key2 = a[i].getKey();
-            int value2 = a[i++].getValue();
-            int key3 = a[i].getKey();
-            int value3 = a[i++].getValue();
-            int key4 = a[i].getKey();
-            int value4 = a[i++].getValue();
-
-            if (!(key1 == key2 && key2 == key3 && key3 == key4)) {
-                failed("On position " + i + " keys are different " +
-                    key1 + ", " + key2 + ", " + key3 + ", " + key4);
-            }
-            if (!(value1 < value2 && value2 < value3 && value3 < value4)) {
-                failed("Sorting is not stable at position " + i +
-                    ". Second values have been changed: " +  value1 + ", " +
-                    value2 + ", " + value3 + ", " + value4);
-            }
-        }
-    }
-
-    private static Pair[] build(int length, Random random) {
-        Pair[] a = new Pair[length * 4];
-
-        for (int i = 0; i < a.length; ) {
-            int key = random.nextInt();
-            a[i++] = new Pair(key, 1);
-            a[i++] = new Pair(key, 2);
-            a[i++] = new Pair(key, 3);
-            a[i++] = new Pair(key, 4);
-        }
-        return a;
-    }
-
-    private static Comparator<Pair> pairCmp = new Comparator<Pair>() {
-        public int compare(Pair p1, Pair p2) {
-            return p1.compareTo(p2);
-        }
-    };
-
-    private static final class Pair implements Comparable<Pair> {
-        Pair(int key, int value) {
-            myKey = key;
-            myValue = value;
-        }
-
-        int getKey() {
-            return myKey;
-        }
-
-        int getValue() {
-            return myValue;
-        }
-
-        public int compareTo(Pair pair) {
-            if (myKey < pair.myKey) {
-                return -1;
-            }
-            if (myKey > pair.myKey) {
-                return 1;
-            }
-            return 0;
-        }
-
-        @Override
-        public String toString() {
-            return "(" + myKey + ", " + myValue + ")";
-        }
-
-        private int myKey;
-        private int myValue;
-    }
-
-
-    private static void testAndCheckWithInsertionSort(int length, MyRandom random) {
-        if (length > 1000) {
-            return;
-        }
-        ourDescription = "Check sorting with insertion sort";
-        int[] golden = new int[length];
-
-        for (int m = 1; m < 2 * length; m *= 2) {
-            for (UnsortedBuilder builder : UnsortedBuilder.values()) {
-                builder.build(golden, m, random);
-                int[] test = golden.clone();
-
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'insertion sort': " + converter +
-                        " " + builder + "random = " + random.getSeed() +
-                        ", length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    Object convertedTest1 = converter.convert(test);
-                    Object convertedTest2 = converter.convert(test);
-                    sort(convertedTest1);
-                    sortByInsertionSort(convertedTest2);
-                    compare(convertedTest1, convertedTest2);
-                }
-            }
-        }
-        out.println();
-    }
-
-    private static void testMergeSort(int length) {
-        if (length < 1000) {
-            return;
-        }
-        ourDescription = "Check merge sorting";
-        int[] golden = new int[length];
-        int period = 67; // java.util.DualPivotQuicksort.MAX_RUN_COUNT
-
-        for (int m = period - 2; m <= period + 2; m++) {
-            for (MergeBuilder builder : MergeBuilder.values()) {
-                builder.build(golden, m);
-                int[] test = golden.clone();
-
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'merge sort': " + converter + " " +
-                        builder + "length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    sort(convertedGolden);
-                    checkSorted(convertedGolden);
-                }
-            }
-        }
-        out.println();
-    }
-
-    private static void testAndCheckWithCheckSum(int length, MyRandom random) {
-        ourDescription = "Check sorting with check sum";
-        int[] golden = new int[length];
-
-        for (int m = 1; m < 2 * length; m *= 2) {
-            for (UnsortedBuilder builder : UnsortedBuilder.values()) {
-                builder.build(golden, m, random);
-                int[] test = golden.clone();
-
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'check sum': " + converter +
-                        " " + builder + "random = " + random.getSeed() +
-                        ", length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    Object convertedTest = converter.convert(test);
-                    sort(convertedTest);
-                    checkWithCheckSum(convertedTest, convertedGolden);
-                }
-            }
-        }
-        out.println();
-    }
-
-    private static void testAndCheckWithScrambling(int length, MyRandom random) {
-        ourDescription = "Check sorting with scrambling";
-        int[] golden = new int[length];
-
-        for (int m = 1; m <= 7; m++) {
-            if (m > length) {
-                break;
-            }
-            for (SortedBuilder builder : SortedBuilder.values()) {
-                builder.build(golden, m);
-                int[] test = golden.clone();
-                scramble(test, random);
-
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'scrambling': " + converter +
-                       " " + builder + "random = " + random.getSeed() +
-                       ", length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    Object convertedTest = converter.convert(test);
-                    sort(convertedTest);
-                    compare(convertedTest, convertedGolden);
-                }
-            }
-        }
-        out.println();
-    }
-
-    private static void testAndCheckFloat(int length, MyRandom random) {
-        ourDescription = "Check float sorting";
-        float[] golden = new float[length];
-        final int MAX = 10;
-        boolean newLine = false;
-
-        for (int a = 0; a <= MAX; a++) {
-            for (int g = 0; g <= MAX; g++) {
-                for (int z = 0; z <= MAX; z++) {
-                    for (int n = 0; n <= MAX; n++) {
-                        for (int p = 0; p <= MAX; p++) {
-                            if (a + g + z + n + p > length) {
-                                continue;
-                            }
-                            if (a + g + z + n + p < length) {
-                                continue;
-                            }
-                            for (FloatBuilder builder : FloatBuilder.values()) {
-                                out.println("Test 'float': random = " + random.getSeed() +
-                                   ", length = " + length + ", a = " + a + ", g = " +
-                                   g + ", z = " + z + ", n = " + n + ", p = " + p);
-                                builder.build(golden, a, g, z, n, p, random);
-                                float[] test = golden.clone();
-                                scramble(test, random);
-                                sort(test);
-                                compare(test, golden, a, n, g);
-                            }
-                            newLine = true;
-                        }
-                    }
-                }
-            }
-        }
-        if (newLine) {
-            out.println();
-        }
-    }
-
-    private static void testAndCheckDouble(int length, MyRandom random) {
-        ourDescription = "Check double sorting";
-        double[] golden = new double[length];
-        final int MAX = 10;
-        boolean newLine = false;
-
-        for (int a = 0; a <= MAX; a++) {
-            for (int g = 0; g <= MAX; g++) {
-                for (int z = 0; z <= MAX; z++) {
-                    for (int n = 0; n <= MAX; n++) {
-                        for (int p = 0; p <= MAX; p++) {
-                            if (a + g + z + n + p > length) {
-                                continue;
-                            }
-                            if (a + g + z + n + p < length) {
-                                continue;
-                            }
-                            for (DoubleBuilder builder : DoubleBuilder.values()) {
-                                out.println("Test 'double': random = " + random.getSeed() +
-                                   ", length = " + length + ", a = " + a + ", g = " +
-                                   g + ", z = " + z + ", n = " + n + ", p = " + p);
-                                builder.build(golden, a, g, z, n, p, random);
-                                double[] test = golden.clone();
-                                scramble(test, random);
-                                sort(test);
-                                compare(test, golden, a, n, g);
-                            }
-                            newLine = true;
-                        }
-                    }
-                }
-            }
-        }
-        if (newLine) {
-            out.println();
-        }
-    }
-
-    private static void prepareSubArray(int[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            a[i] = 0xDEDA;
-        }
-        int middle = (fromIndex + toIndex) >>> 1;
-        int k = 0;
-
-        for (int i = fromIndex; i < middle; i++) {
-            a[i] = k++;
-        }
-        for (int i = middle; i < toIndex; i++) {
-            a[i] = k--;
-        }
-        for (int i = toIndex; i < a.length; i++) {
-            a[i] = 0xBABA;
-        }
-    }
-
-    private static void scramble(int[] a, Random random) {
-        for (int i = 0; i < a.length * 7; i++) {
-            swap(a, random.nextInt(a.length), random.nextInt(a.length));
-        }
-    }
-
-    private static void scramble(float[] a, Random random) {
-        for (int i = 0; i < a.length * 7; i++) {
-            swap(a, random.nextInt(a.length), random.nextInt(a.length));
-        }
-    }
-
-    private static void scramble(double[] a, Random random) {
-        for (int i = 0; i < a.length * 7; i++) {
-            swap(a, random.nextInt(a.length), random.nextInt(a.length));
-        }
-    }
-
-    private static void swap(int[] a, int i, int j) {
-        int t = a[i];
-        a[i] = a[j];
-        a[j] = t;
-    }
-
-    private static void swap(float[] a, int i, int j) {
-        float t = a[i];
-        a[i] = a[j];
-        a[j] = t;
-    }
-
-    private static void swap(double[] a, int i, int j) {
-        double t = a[i];
-        a[i] = a[j];
-        a[j] = t;
-    }
-
-    private static enum TypeConverter {
-        INT {
-            Object convert(int[] a) {
-                return a.clone();
-            }
-        },
-        LONG {
-            Object convert(int[] a) {
-                long[] b = new long[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (long) a[i];
-                }
-                return b;
-            }
-        },
-        BYTE {
-            Object convert(int[] a) {
-                byte[] b = new byte[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (byte) a[i];
-                }
-                return b;
-            }
-        },
-        SHORT {
-            Object convert(int[] a) {
-                short[] b = new short[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (short) a[i];
-                }
-                return b;
-            }
-        },
-        CHAR {
-            Object convert(int[] a) {
-                char[] b = new char[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (char) a[i];
-                }
-                return b;
-            }
-        },
-        FLOAT {
-            Object convert(int[] a) {
-                float[] b = new float[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (float) a[i];
-                }
-                return b;
-            }
-        },
-        DOUBLE {
-            Object convert(int[] a) {
-                double[] b = new double[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (double) a[i];
-                }
-                return b;
-            }
-        },
-        INTEGER {
-            Object convert(int[] a) {
-                Integer[] b = new Integer[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = new Integer(a[i]);
-                }
-                return b;
-            }
-        };
-
-        abstract Object convert(int[] a);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 9; i++) {
-                name += " ";
-            }
-            return name;
-        }
-    }
-
-    private static enum FloatBuilder {
-        SIMPLE {
-            void build(float[] x, int a, int g, int z, int n, int p, Random random) {
-                int fromIndex = 0;
-                float negativeValue = -random.nextFloat();
-                float positiveValue =  random.nextFloat();
-
-                writeValue(x, negativeValue, fromIndex, n);
-                fromIndex += n;
-
-                writeValue(x, -0.0f, fromIndex, g);
-                fromIndex += g;
-
-                writeValue(x, 0.0f, fromIndex, z);
-                fromIndex += z;
-
-                writeValue(x, positiveValue, fromIndex, p);
-                fromIndex += p;
-
-                writeValue(x, Float.NaN, fromIndex, a);
-            }
-        };
-
-        abstract void build(float[] x, int a, int g, int z, int n, int p, Random random);
-    }
-
-    private static enum DoubleBuilder {
-        SIMPLE {
-            void build(double[] x, int a, int g, int z, int n, int p, Random random) {
-                int fromIndex = 0;
-                double negativeValue = -random.nextFloat();
-                double positiveValue =  random.nextFloat();
-
-                writeValue(x, negativeValue, fromIndex, n);
-                fromIndex += n;
-
-                writeValue(x, -0.0d, fromIndex, g);
-                fromIndex += g;
-
-                writeValue(x, 0.0d, fromIndex, z);
-                fromIndex += z;
-
-                writeValue(x, positiveValue, fromIndex, p);
-                fromIndex += p;
-
-                writeValue(x, Double.NaN, fromIndex, a);
-            }
-        };
-
-        abstract void build(double[] x, int a, int g, int z, int n, int p, Random random);
-    }
-
-    private static void writeValue(float[] a, float value, int fromIndex, int count) {
-        for (int i = fromIndex; i < fromIndex + count; i++) {
-            a[i] = value;
-        }
-    }
-
-    private static void compare(float[] a, float[] b, int numNaN, int numNeg, int numNegZero) {
-        for (int i = a.length - numNaN; i < a.length; i++) {
-            if (a[i] == a[i]) {
-                failed("On position " + i + " must be NaN instead of " + a[i]);
-            }
-        }
-        final int NEGATIVE_ZERO = Float.floatToIntBits(-0.0f);
-
-        for (int i = numNeg; i < numNeg + numNegZero; i++) {
-            if (NEGATIVE_ZERO != Float.floatToIntBits(a[i])) {
-                failed("On position " + i + " must be -0.0 instead of " + a[i]);
-            }
-        }
-        for (int i = 0; i < a.length - numNaN; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void writeValue(double[] a, double value, int fromIndex, int count) {
-        for (int i = fromIndex; i < fromIndex + count; i++) {
-            a[i] = value;
-        }
-    }
-
-    private static void compare(double[] a, double[] b, int numNaN, int numNeg, int numNegZero) {
-        for (int i = a.length - numNaN; i < a.length; i++) {
-            if (a[i] == a[i]) {
-                failed("On position " + i + " must be NaN instead of " + a[i]);
-            }
-        }
-        final long NEGATIVE_ZERO = Double.doubleToLongBits(-0.0d);
-
-        for (int i = numNeg; i < numNeg + numNegZero; i++) {
-            if (NEGATIVE_ZERO != Double.doubleToLongBits(a[i])) {
-                failed("On position " + i + " must be -0.0 instead of " + a[i]);
-            }
-        }
-        for (int i = 0; i < a.length - numNaN; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static enum SortedBuilder {
-        REPEATED {
-            void build(int[] a, int m) {
-                int period = a.length / m;
-                int i = 0;
-                int k = 0;
-
-                while (true) {
-                    for (int t = 1; t <= period; t++) {
-                        if (i >= a.length) {
-                            return;
-                        }
-                        a[i++] = k;
-                    }
-                    if (i >= a.length) {
-                        return;
-                    }
-                    k++;
-                }
-            }
-        },
-        ORGAN_PIPES {
-            void build(int[] a, int m) {
-                int i = 0;
-                int k = m;
-
-                while (true) {
-                    for (int t = 1; t <= m; t++) {
-                        if (i >= a.length) {
-                            return;
-                        }
-                        a[i++] = k;
-                    }
-                }
-            }
-        };
-
-        abstract void build(int[] a, int m);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 12; i++) {
-                name += " ";
-            }
-            return name;
-        }
-    }
-
-    private static enum MergeBuilder {
-        ASCENDING {
-            void build(int[] a, int m) {
-                int period = a.length / m;
-                int v = 1, i = 0;
-
-                for (int k = 0; k < m; k++) {
-                    v = 1;
-                    for (int p = 0; p < period; p++) {
-                        a[i++] = v++;
-                    }
-                }
-                for (int j = i; j < a.length - 1; j++) {
-                    a[j] = v++;
-                }
-                a[a.length - 1] = 0;
-            }
-        },
-        DESCENDING {
-            void build(int[] a, int m) {
-                int period = a.length / m;
-                int v = -1, i = 0;
-
-                for (int k = 0; k < m; k++) {
-                    v = -1;
-                    for (int p = 0; p < period; p++) {
-                        a[i++] = v--;
-                    }
-                }
-                for (int j = i; j < a.length - 1; j++) {
-                    a[j] = v--;
-                }
-                a[a.length - 1] = 0;
-            }
-        };
-
-        abstract void build(int[] a, int m);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 12; i++) {
-                name += " ";
-            }
-            return name;
-        }
-    }
-
-    private static enum UnsortedBuilder {
-        RANDOM {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = random.nextInt();
-                }
-            }
-        },
-        ASCENDING {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = m + i;
-                }
-            }
-        },
-        DESCENDING {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = a.length - m - i;
-                }
-            }
-        },
-        ALL_EQUAL {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = m;
-                }
-            }
-        },
-        SAW {
-            void build(int[] a, int m, Random random) {
-                int incCount = 1;
-                int decCount = a.length;
-                int i = 0;
-                int period = m--;
-
-                while (true) {
-                    for (int k = 1; k <= period; k++) {
-                        if (i >= a.length) {
-                            return;
-                        }
-                        a[i++] = incCount++;
-                    }
-                    period += m;
-
-                    for (int k = 1; k <= period; k++) {
-                        if (i >= a.length) {
-                            return;
-                        }
-                        a[i++] = decCount--;
-                    }
-                    period += m;
-                }
-            }
-        },
-        REPEATED {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = i % m;
-                }
-            }
-        },
-        DUPLICATED {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = random.nextInt(m);
-                }
-            }
-        },
-        ORGAN_PIPES {
-            void build(int[] a, int m, Random random) {
-                int middle = a.length / (m + 1);
-
-                for (int i = 0; i < middle; i++) {
-                    a[i] = i;
-                }
-                for (int i = middle; i < a.length; i++) {
-                    a[i] = a.length - i - 1;
-                }
-            }
-        },
-        STAGGER {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = (i * m + i) % a.length;
-                }
-            }
-        },
-        PLATEAU {
-            void build(int[] a, int m, Random random) {
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = Math.min(i, m);
-                }
-            }
-        },
-        SHUFFLE {
-            void build(int[] a, int m, Random random) {
-                int x = 0, y = 0;
-                for (int i = 0; i < a.length; i++) {
-                    a[i] = random.nextBoolean() ? (x += 2) : (y += 2);
-                }
-            }
-        };
-
-        abstract void build(int[] a, int m, Random random);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 12; i++) {
-                name += " ";
-            }
-            return name;
-        }
-    }
-
-    private static void checkWithCheckSum(Object test, Object golden) {
-        checkSorted(test);
-        checkCheckSum(test, golden);
-    }
-
-    private static void failed(String message) {
-        err.format("\n*** TEST FAILED - %s.\n\n%s.\n\n", ourDescription, message);
-        throw new RuntimeException("Test failed - see log file for details");
-    }
-
-    private static void failedSort(int index, String value1, String value2) {
-        failed("Array is not sorted at " + index + "-th position: " +
-            value1 + " and " + value2);
-    }
-
-    private static void failedCompare(int index, String value1, String value2) {
-        failed("On position " + index + " must be " + value2 + " instead of " + value1);
-    }
-
-    private static void compare(Object test, Object golden) {
-        if (test instanceof int[]) {
-            compare((int[]) test, (int[]) golden);
-        } else if (test instanceof long[]) {
-            compare((long[]) test, (long[]) golden);
-        } else if (test instanceof short[]) {
-            compare((short[]) test, (short[]) golden);
-        } else if (test instanceof byte[]) {
-            compare((byte[]) test, (byte[]) golden);
-        } else if (test instanceof char[]) {
-            compare((char[]) test, (char[]) golden);
-        } else if (test instanceof float[]) {
-            compare((float[]) test, (float[]) golden);
-        } else if (test instanceof double[]) {
-            compare((double[]) test, (double[]) golden);
-        } else if (test instanceof Integer[]) {
-            compare((Integer[]) test, (Integer[]) golden);
-        } else {
-            failed("Unknow type of array: " + test + " of class " +
-                test.getClass().getName());
-        }
-    }
-
-    private static void compare(int[] a, int[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(long[] a, long[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(short[] a, short[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(byte[] a, byte[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(char[] a, char[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(float[] a, float[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(double[] a, double[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(Integer[] a, Integer[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i].compareTo(b[i]) != 0) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void checkSorted(Object object) {
-        if (object instanceof int[]) {
-            checkSorted((int[]) object);
-        } else if (object instanceof long[]) {
-            checkSorted((long[]) object);
-        } else if (object instanceof short[]) {
-            checkSorted((short[]) object);
-        } else if (object instanceof byte[]) {
-            checkSorted((byte[]) object);
-        } else if (object instanceof char[]) {
-            checkSorted((char[]) object);
-        } else if (object instanceof float[]) {
-            checkSorted((float[]) object);
-        } else if (object instanceof double[]) {
-            checkSorted((double[]) object);
-        } else if (object instanceof Integer[]) {
-            checkSorted((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
-
-    private static void checkSorted(int[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(long[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(short[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(byte[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(char[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(float[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(double[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(Integer[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i].intValue() > a[i + 1].intValue()) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkCheckSum(Object test, Object golden) {
-        if (checkSumXor(test) != checkSumXor(golden)) {
-            failed("Original and sorted arrays are not identical [xor]");
-        }
-        if (checkSumPlus(test) != checkSumPlus(golden)) {
-            failed("Original and sorted arrays are not identical [plus]");
-        }
-    }
-
-    private static int checkSumXor(Object object) {
-        if (object instanceof int[]) {
-            return checkSumXor((int[]) object);
-        } else if (object instanceof long[]) {
-            return checkSumXor((long[]) object);
-        } else if (object instanceof short[]) {
-            return checkSumXor((short[]) object);
-        } else if (object instanceof byte[]) {
-            return checkSumXor((byte[]) object);
-        } else if (object instanceof char[]) {
-            return checkSumXor((char[]) object);
-        } else if (object instanceof float[]) {
-            return checkSumXor((float[]) object);
-        } else if (object instanceof double[]) {
-            return checkSumXor((double[]) object);
-        } else if (object instanceof Integer[]) {
-            return checkSumXor((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-            return -1;
-        }
-    }
-
-    private static int checkSumXor(Integer[] a) {
-        int checkSum = 0;
-
-        for (Integer e : a) {
-            checkSum ^= e.intValue();
-        }
-        return checkSum;
-    }
-
-    private static int checkSumXor(int[] a) {
-        int checkSum = 0;
-
-        for (int e : a) {
-            checkSum ^= e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumXor(long[] a) {
-        long checkSum = 0;
-
-        for (long e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(short[] a) {
-        short checkSum = 0;
-
-        for (short e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(byte[] a) {
-        byte checkSum = 0;
-
-        for (byte e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(char[] a) {
-        char checkSum = 0;
-
-        for (char e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(float[] a) {
-        int checkSum = 0;
-
-        for (float e : a) {
-            checkSum ^= (int) e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumXor(double[] a) {
-        int checkSum = 0;
-
-        for (double e : a) {
-            checkSum ^= (int) e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumPlus(Object object) {
-        if (object instanceof int[]) {
-            return checkSumPlus((int[]) object);
-        } else if (object instanceof long[]) {
-            return checkSumPlus((long[]) object);
-        } else if (object instanceof short[]) {
-            return checkSumPlus((short[]) object);
-        } else if (object instanceof byte[]) {
-            return checkSumPlus((byte[]) object);
-        } else if (object instanceof char[]) {
-            return checkSumPlus((char[]) object);
-        } else if (object instanceof float[]) {
-            return checkSumPlus((float[]) object);
-        } else if (object instanceof double[]) {
-            return checkSumPlus((double[]) object);
-        } else if (object instanceof Integer[]) {
-            return checkSumPlus((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-            return -1;
-        }
-    }
-
-    private static int checkSumPlus(int[] a) {
-        int checkSum = 0;
-
-        for (int e : a) {
-            checkSum += e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumPlus(long[] a) {
-        long checkSum = 0;
-
-        for (long e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumPlus(short[] a) {
-        short checkSum = 0;
-
-        for (short e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumPlus(byte[] a) {
-        byte checkSum = 0;
-
-        for (byte e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumPlus(char[] a) {
-        char checkSum = 0;
-
-        for (char e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumPlus(float[] a) {
-        int checkSum = 0;
-
-        for (float e : a) {
-            checkSum += (int) e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumPlus(double[] a) {
-        int checkSum = 0;
-
-        for (double e : a) {
-            checkSum += (int) e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumPlus(Integer[] a) {
-        int checkSum = 0;
-
-        for (Integer e : a) {
-            checkSum += e.intValue();
-        }
-        return checkSum;
-    }
-
-    private static void sortByInsertionSort(Object object) {
-        if (object instanceof int[]) {
-            sortByInsertionSort((int[]) object);
-        } else if (object instanceof long[]) {
-            sortByInsertionSort((long[]) object);
-        } else if (object instanceof short[]) {
-            sortByInsertionSort((short[]) object);
-        } else if (object instanceof byte[]) {
-            sortByInsertionSort((byte[]) object);
-        } else if (object instanceof char[]) {
-            sortByInsertionSort((char[]) object);
-        } else if (object instanceof float[]) {
-            sortByInsertionSort((float[]) object);
-        } else if (object instanceof double[]) {
-            sortByInsertionSort((double[]) object);
-        } else if (object instanceof Integer[]) {
-            sortByInsertionSort((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
-
-    private static void sortByInsertionSort(int[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            int ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(long[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            long ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(short[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            short ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(byte[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            byte ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(char[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            char ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(float[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            float ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(double[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            double ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(Integer[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            Integer ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sort(Object object) {
-        if (object instanceof int[]) {
-            Arrays.parallelSort((int[]) object);
-        } else if (object instanceof long[]) {
-            Arrays.parallelSort((long[]) object);
-        } else if (object instanceof short[]) {
-            Arrays.parallelSort((short[]) object);
-        } else if (object instanceof byte[]) {
-            Arrays.parallelSort((byte[]) object);
-        } else if (object instanceof char[]) {
-            Arrays.parallelSort((char[]) object);
-        } else if (object instanceof float[]) {
-            Arrays.parallelSort((float[]) object);
-        } else if (object instanceof double[]) {
-            Arrays.parallelSort((double[]) object);
-        } else if (object instanceof Integer[]) {
-            Arrays.parallelSort((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
-
-    private static void sortSubArray(Object object, int fromIndex, int toIndex) {
-        if (object instanceof int[]) {
-            Arrays.parallelSort((int[]) object, fromIndex, toIndex);
-        } else if (object instanceof long[]) {
-            Arrays.parallelSort((long[]) object, fromIndex, toIndex);
-        } else if (object instanceof short[]) {
-            Arrays.parallelSort((short[]) object, fromIndex, toIndex);
-        } else if (object instanceof byte[]) {
-            Arrays.parallelSort((byte[]) object, fromIndex, toIndex);
-        } else if (object instanceof char[]) {
-            Arrays.parallelSort((char[]) object, fromIndex, toIndex);
-        } else if (object instanceof float[]) {
-            Arrays.parallelSort((float[]) object, fromIndex, toIndex);
-        } else if (object instanceof double[]) {
-            Arrays.parallelSort((double[]) object, fromIndex, toIndex);
-        } else if (object instanceof Integer[]) {
-            Arrays.parallelSort((Integer[]) object, fromIndex, toIndex);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
-
-    private static void checkSubArray(Object object, int fromIndex, int toIndex, int m) {
-        if (object instanceof int[]) {
-            checkSubArray((int[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof long[]) {
-            checkSubArray((long[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof short[]) {
-            checkSubArray((short[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof byte[]) {
-            checkSubArray((byte[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof char[]) {
-            checkSubArray((char[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof float[]) {
-            checkSubArray((float[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof double[]) {
-            checkSubArray((double[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof Integer[]) {
-            checkSubArray((Integer[]) object, fromIndex, toIndex, m);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
-
-    private static void checkSubArray(Integer[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i].intValue() != 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i].intValue() > a[i + 1].intValue()) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i].intValue() != 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(int[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(byte[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (byte) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (byte) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(long[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (long) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (long) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(char[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (char) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (char) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(short[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (short) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (short) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(float[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (float) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (float) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(double[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (double) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (double) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkRange(Object object, int m) {
-        if (object instanceof int[]) {
-            checkRange((int[]) object, m);
-        } else if (object instanceof long[]) {
-            checkRange((long[]) object, m);
-        } else if (object instanceof short[]) {
-            checkRange((short[]) object, m);
-        } else if (object instanceof byte[]) {
-            checkRange((byte[]) object, m);
-        } else if (object instanceof char[]) {
-            checkRange((char[]) object, m);
-        } else if (object instanceof float[]) {
-            checkRange((float[]) object, m);
-        } else if (object instanceof double[]) {
-            checkRange((double[]) object, m);
-        } else if (object instanceof Integer[]) {
-            checkRange((Integer[]) object, m);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
-
-    private static void checkRange(Integer[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(int[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(long[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(byte[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(short[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(char[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(float[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(double[] a, int m) {
-        try {
-            Arrays.parallelSort(a, m + 1, m);
-
-            failed("ParallelSort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.parallelSort(a, -m, a.length);
-
-                failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.parallelSort(a, 0, a.length + m);
-
-                    failed("ParallelSort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void outArray(Object[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static void outArray(int[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static void outArray(float[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static void outArray(double[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static class MyRandom extends Random {
-        MyRandom(long seed) {
-            super(seed);
-            mySeed = seed;
-        }
-
-        long getSeed() {
-            return mySeed;
-        }
-
-        private long mySeed;
-    }
-
-    private static String ourDescription;
-}
--- a/test/jdk/java/util/Arrays/Sorting.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/util/Arrays/Sorting.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,277 +23,329 @@
 
 /*
  * @test
- * @bug 6880672 6896573 6899694 6976036 7013585 7018258
- * @summary Exercise Arrays.sort
+ * @compile/module=java.base java/util/SortingHelper.java
+ * @bug 6880672 6896573 6899694 6976036 7013585 7018258 8003981 8226297
  * @build Sorting
  * @run main Sorting -shortrun
+ * @summary Exercise Arrays.sort, Arrays.parallelSort
  *
  * @author Vladimir Yaroslavskiy
  * @author Jon Bentley
  * @author Josh Bloch
  */
 
-import java.util.Arrays;
+import java.io.PrintStream;
+import java.util.Comparator;
 import java.util.Random;
-import java.io.PrintStream;
+import java.util.SortingHelper;
 
 public class Sorting {
+
     private static final PrintStream out = System.out;
     private static final PrintStream err = System.err;
 
     // Array lengths used in a long run (default)
     private static final int[] LONG_RUN_LENGTHS = {
-        1, 2, 3, 5, 8, 13, 21, 34, 55, 100, 1000, 10000, 100000, 1000000 };
+        1, 3, 8, 21, 55, 100, 1_000, 10_000, 100_000 };
 
     // Array lengths used in a short run
     private static final int[] SHORT_RUN_LENGTHS = {
-        1, 2, 3, 21, 55, 1000, 10000 };
+        1, 8, 55, 100, 10_000 };
 
     // Random initial values used in a long run (default)
-    private static final long[] LONG_RUN_RANDOMS = { 666, 0xC0FFEE, 999 };
+    private static final TestRandom[] LONG_RUN_RANDOMS = {
+        TestRandom.BABA, TestRandom.DEDA, TestRandom.C0FFEE };
 
     // Random initial values used in a short run
-    private static final long[] SHORT_RUN_RANDOMS = { 666 };
+    private static final TestRandom[] SHORT_RUN_RANDOMS = {
+        TestRandom.C0FFEE };
+
+    // Constants used in subarray sorting
+    private static final int A380 = 0xA380;
+    private static final int B747 = 0xB747;
+
+    private final SortingHelper sortingHelper;
+    private final TestRandom[] randoms;
+    private final int[] lengths;
+    private Object[] gold;
+    private Object[] test;
 
     public static void main(String[] args) {
+        long start = System.currentTimeMillis();
         boolean shortRun = args.length > 0 && args[0].equals("-shortrun");
-        long start = System.currentTimeMillis();
+
+        int[] lengths = shortRun ? SHORT_RUN_LENGTHS : LONG_RUN_LENGTHS;
+        TestRandom[] randoms = shortRun ? SHORT_RUN_RANDOMS : LONG_RUN_RANDOMS;
 
-        if (shortRun) {
-            testAndCheck(SHORT_RUN_LENGTHS, SHORT_RUN_RANDOMS);
-        } else {
-            testAndCheck(LONG_RUN_LENGTHS, LONG_RUN_RANDOMS);
-        }
+        new Sorting(SortingHelper.DUAL_PIVOT_QUICKSORT, randoms, lengths).testCore();
+        new Sorting(SortingHelper.PARALLEL_SORT, randoms, lengths).testCore();
+        new Sorting(SortingHelper.HEAP_SORT, randoms, lengths).testBasic();
+        new Sorting(SortingHelper.ARRAYS_SORT, randoms, lengths).testAll();
+        new Sorting(SortingHelper.ARRAYS_PARALLEL_SORT, randoms, lengths).testAll();
+
         long end = System.currentTimeMillis();
-
-        out.format("PASSED in %d sec.\n", Math.round((end - start) / 1E3));
+        out.format("PASSED in %d sec.\n", (end - start) / 1000);
     }
 
-    private static void testAndCheck(int[] lengths, long[] randoms) {
-        testEmptyAndNullIntArray();
-        testEmptyAndNullLongArray();
-        testEmptyAndNullShortArray();
-        testEmptyAndNullCharArray();
-        testEmptyAndNullByteArray();
-        testEmptyAndNullFloatArray();
-        testEmptyAndNullDoubleArray();
+    private Sorting(SortingHelper sortingHelper, TestRandom[] randoms, int[] lengths) {
+        this.sortingHelper = sortingHelper;
+        this.randoms = randoms;
+        this.lengths = lengths;
+    }
+
+    private void testBasic() {
+        testEmptyArray();
 
         for (int length : lengths) {
-            testMergeSort(length);
-            testAndCheckRange(length);
-            testAndCheckSubArray(length);
+            createData(length);
+            testBasic(length);
         }
-        for (long seed : randoms) {
-            for (int length : lengths) {
-                testAndCheckWithInsertionSort(length, new MyRandom(seed));
-                testAndCheckWithCheckSum(length, new MyRandom(seed));
-                testAndCheckWithScrambling(length, new MyRandom(seed));
-                testAndCheckFloat(length, new MyRandom(seed));
-                testAndCheckDouble(length, new MyRandom(seed));
-                testStable(length, new MyRandom(seed));
-            }
+    }
+
+    private void testBasic(int length) {
+        for (TestRandom random : randoms) {
+            testWithInsertionSort(length, random);
+            testWithCheckSum(length, random);
+            testWithScrambling(length, random);
         }
     }
 
-    private static void testEmptyAndNullIntArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.sort(new int[] {});
-        Arrays.sort(new int[] {}, 0, 0);
+    private void testCore() {
+        for (int length : lengths) {
+            createData(length);
+            testCore(length);
+        }
+    }
+
+    private void testCore(int length) {
+        testBasic(length);
 
-        try {
-            Arrays.sort((int[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.sort((int[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.sort(int[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
+        for (TestRandom random : randoms) {
+            testMergingSort(length, random);
+            testSubArray(length, random);
+            testNegativeZero(length, random);
+            testFloatingPointSorting(length, random);
         }
-        failed("Arrays.sort(int[]) shouldn't catch null array");
+    }
+
+    private void testAll() {
+        for (int length : lengths) {
+            createData(length);
+            testAll(length);
+        }
+    }
+
+    private void testAll(int length) {
+        testCore(length);
+
+        for (TestRandom random : randoms) {
+            testRange(length, random);
+            testStability(length, random);
+        }
     }
 
-    private static void testEmptyAndNullLongArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.sort(new long[] {});
-        Arrays.sort(new long[] {}, 0, 0);
+    private void testEmptyArray() {
+        testEmptyAndNullIntArray();
+        testEmptyAndNullLongArray();
+        testEmptyAndNullByteArray();
+        testEmptyAndNullCharArray();
+        testEmptyAndNullShortArray();
+        testEmptyAndNullFloatArray();
+        testEmptyAndNullDoubleArray();
+    }
+
+    private void testStability(int length, TestRandom random) {
+        printTestName("Test stability", random, length);
+
+        Pair[] a = build(length, random);
+        sortingHelper.sort(a);
+        checkSorted(a);
+        checkStable(a);
+
+        a = build(length, random);
+        sortingHelper.sort(a, pairComparator);
+        checkSorted(a);
+        checkStable(a);
+
+        out.println();
+    }
+
+    private void testEmptyAndNullIntArray() {
+        sortingHelper.sort(new int[] {});
+        sortingHelper.sort(new int[] {}, 0, 0);
 
         try {
-            Arrays.sort((long[]) null);
+            sortingHelper.sort(null);
         } catch (NullPointerException expected) {
             try {
-                Arrays.sort((long[]) null, 0, 0);
-            } catch (NullPointerException expected2) {
-                return;
-            }
-            failed("Arrays.sort(long[],fromIndex,toIndex) shouldn't " +
-                "catch null array");
-        }
-        failed("Arrays.sort(long[]) shouldn't catch null array");
-    }
-
-    private static void testEmptyAndNullShortArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.sort(new short[] {});
-        Arrays.sort(new short[] {}, 0, 0);
-
-        try {
-            Arrays.sort((short[]) null);
-        } catch (NullPointerException expected) {
-            try {
-                Arrays.sort((short[]) null, 0, 0);
+                sortingHelper.sort(null, 0, 0);
             } catch (NullPointerException expected2) {
                 return;
             }
-            failed("Arrays.sort(short[],fromIndex,toIndex) shouldn't " +
+            fail(sortingHelper + "(int[],fromIndex,toIndex) shouldn't " +
                 "catch null array");
         }
-        failed("Arrays.sort(short[]) shouldn't catch null array");
+        fail(sortingHelper + "(int[]) shouldn't catch null array");
     }
 
-    private static void testEmptyAndNullCharArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.sort(new char[] {});
-        Arrays.sort(new char[] {}, 0, 0);
+    private void testEmptyAndNullLongArray() {
+        sortingHelper.sort(new long[] {});
+        sortingHelper.sort(new long[] {}, 0, 0);
 
         try {
-            Arrays.sort((char[]) null);
+            sortingHelper.sort(null);
         } catch (NullPointerException expected) {
             try {
-                Arrays.sort((char[]) null, 0, 0);
+                sortingHelper.sort(null, 0, 0);
             } catch (NullPointerException expected2) {
                 return;
             }
-            failed("Arrays.sort(char[],fromIndex,toIndex) shouldn't " +
+            fail(sortingHelper + "(long[],fromIndex,toIndex) shouldn't " +
                 "catch null array");
         }
-        failed("Arrays.sort(char[]) shouldn't catch null array");
+        fail(sortingHelper + "(long[]) shouldn't catch null array");
     }
 
-    private static void testEmptyAndNullByteArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.sort(new byte[] {});
-        Arrays.sort(new byte[] {}, 0, 0);
+    private void testEmptyAndNullByteArray() {
+        sortingHelper.sort(new byte[] {});
+        sortingHelper.sort(new byte[] {}, 0, 0);
 
         try {
-            Arrays.sort((byte[]) null);
+            sortingHelper.sort(null);
         } catch (NullPointerException expected) {
             try {
-                Arrays.sort((byte[]) null, 0, 0);
+                sortingHelper.sort(null, 0, 0);
             } catch (NullPointerException expected2) {
                 return;
             }
-            failed("Arrays.sort(byte[],fromIndex,toIndex) shouldn't " +
+            fail(sortingHelper + "(byte[],fromIndex,toIndex) shouldn't " +
                 "catch null array");
         }
-        failed("Arrays.sort(byte[]) shouldn't catch null array");
+        fail(sortingHelper + "(byte[]) shouldn't catch null array");
     }
 
-    private static void testEmptyAndNullFloatArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.sort(new float[] {});
-        Arrays.sort(new float[] {}, 0, 0);
+    private void testEmptyAndNullCharArray() {
+        sortingHelper.sort(new char[] {});
+        sortingHelper.sort(new char[] {}, 0, 0);
 
         try {
-            Arrays.sort((float[]) null);
+            sortingHelper.sort(null);
         } catch (NullPointerException expected) {
             try {
-                Arrays.sort((float[]) null, 0, 0);
+                sortingHelper.sort(null, 0, 0);
             } catch (NullPointerException expected2) {
                 return;
             }
-            failed("Arrays.sort(float[],fromIndex,toIndex) shouldn't " +
+            fail(sortingHelper + "(char[],fromIndex,toIndex) shouldn't " +
                 "catch null array");
         }
-        failed("Arrays.sort(float[]) shouldn't catch null array");
+        fail(sortingHelper + "(char[]) shouldn't catch null array");
     }
 
-    private static void testEmptyAndNullDoubleArray() {
-        ourDescription = "Check empty and null array";
-        Arrays.sort(new double[] {});
-        Arrays.sort(new double[] {}, 0, 0);
+    private void testEmptyAndNullShortArray() {
+        sortingHelper.sort(new short[] {});
+        sortingHelper.sort(new short[] {}, 0, 0);
 
         try {
-            Arrays.sort((double[]) null);
+            sortingHelper.sort(null);
         } catch (NullPointerException expected) {
             try {
-                Arrays.sort((double[]) null, 0, 0);
+                sortingHelper.sort(null, 0, 0);
+            } catch (NullPointerException expected2) {
+                return;
+            }
+            fail(sortingHelper + "(short[],fromIndex,toIndex) shouldn't " +
+                "catch null array");
+        }
+        fail(sortingHelper + "(short[]) shouldn't catch null array");
+    }
+
+    private void testEmptyAndNullFloatArray() {
+        sortingHelper.sort(new float[] {});
+        sortingHelper.sort(new float[] {}, 0, 0);
+
+        try {
+            sortingHelper.sort(null);
+        } catch (NullPointerException expected) {
+            try {
+                sortingHelper.sort(null, 0, 0);
             } catch (NullPointerException expected2) {
                 return;
             }
-            failed("Arrays.sort(double[],fromIndex,toIndex) shouldn't " +
+            fail(sortingHelper + "(float[],fromIndex,toIndex) shouldn't " +
                 "catch null array");
         }
-        failed("Arrays.sort(double[]) shouldn't catch null array");
+        fail(sortingHelper + "(float[]) shouldn't catch null array");
     }
 
-    private static void testAndCheckSubArray(int length) {
-        ourDescription = "Check sorting of subarray";
-        int[] golden = new int[length];
-        boolean newLine = false;
+    private void testEmptyAndNullDoubleArray() {
+        sortingHelper.sort(new double[] {});
+        sortingHelper.sort(new double[] {}, 0, 0);
 
-        for (int m = 1; m < length / 2; m *= 2) {
-            newLine = true;
+        try {
+            sortingHelper.sort(null);
+        } catch (NullPointerException expected) {
+            try {
+                sortingHelper.sort(null, 0, 0);
+            } catch (NullPointerException expected2) {
+                return;
+            }
+            fail(sortingHelper + "(double[],fromIndex,toIndex) shouldn't " +
+                "catch null array");
+        }
+        fail(sortingHelper + "(double[]) shouldn't catch null array");
+    }
+
+    private void testSubArray(int length, TestRandom random) {
+        if (length < 4) {
+            return;
+        }
+        for (int m = 1; m < length / 2; m <<= 1) {
             int fromIndex = m;
             int toIndex = length - m;
 
-            prepareSubArray(golden, fromIndex, toIndex, m);
-            int[] test = golden.clone();
+            prepareSubArray((int[]) gold[0], fromIndex, toIndex);
+            convertData(length);
 
-            for (TypeConverter converter : TypeConverter.values()) {
-                out.println("Test 'subarray': " + converter +
-                   " length = " + length + ", m = " + m);
-                Object convertedGolden = converter.convert(golden);
-                Object convertedTest = converter.convert(test);
-                sortSubArray(convertedTest, fromIndex, toIndex);
-                checkSubArray(convertedTest, fromIndex, toIndex, m);
-            }
-        }
-        if (newLine) {
-            out.println();
-        }
-    }
-
-    private static void testAndCheckRange(int length) {
-        ourDescription = "Check range check";
-        int[] golden = new int[length];
-
-        for (int m = 1; m < 2 * length; m *= 2) {
-            for (int i = 1; i <= length; i++) {
-                golden[i - 1] = i % m + m % i;
-            }
-            for (TypeConverter converter : TypeConverter.values()) {
-                out.println("Test 'range': " + converter +
-                   ", length = " + length + ", m = " + m);
-                Object convertedGolden = converter.convert(golden);
-                checkRange(convertedGolden, m);
+            for (int i = 0; i < test.length; i++) {
+                printTestName("Test subarray", random, length,
+                    ", m = " + m + ", " + getType(i));
+                sortingHelper.sort(test[i], fromIndex, toIndex);
+                checkSubArray(test[i], fromIndex, toIndex);
             }
         }
         out.println();
     }
 
-    private static void testStable(int length, MyRandom random) {
-        ourDescription = "Check if sorting is stable";
-        Pair[] a = build(length, random);
+    private void testRange(int length, TestRandom random) {
+        if (length < 2) {
+            return;
+        }
+        for (int m = 1; m < length; m <<= 1) {
+            for (int i = 1; i <= length; i++) {
+                ((int[]) gold[0]) [i - 1] = i % m + m % i;
+            }
+            convertData(length);
 
-        out.println("Test 'stable': " + "random = " + random.getSeed() +
-            ", length = " + length);
-        Arrays.sort(a);
-        checkSorted(a);
-        checkStable(a);
+            for (int i = 0; i < test.length; i++) {
+                printTestName("Test range check", random, length,
+                    ", m = " + m + ", " + getType(i));
+                checkRange(test[i], m);
+            }
+        }
         out.println();
     }
 
-    private static void checkSorted(Pair[] a) {
+    private void checkSorted(Pair[] a) {
         for (int i = 0; i < a.length - 1; i++) {
             if (a[i].getKey() > a[i + 1].getKey()) {
-                failedSort(i, "" + a[i].getKey(), "" + a[i + 1].getKey());
+                fail("Array is not sorted at " + i + "-th position: " +
+                    a[i].getKey() + " and " + a[i + 1].getKey());
             }
         }
     }
 
-    private static void checkStable(Pair[] a) {
+    private void checkStable(Pair[] a) {
         for (int i = 0; i < a.length / 4; ) {
             int key1 = a[i].getKey();
             int value1 = a[i++].getValue();
@@ -305,18 +357,18 @@
             int value4 = a[i++].getValue();
 
             if (!(key1 == key2 && key2 == key3 && key3 == key4)) {
-                failed("On position " + i + " keys are different " +
-                    key1 + ", " + key2 + ", " + key3 + ", " + key4);
+                fail("Keys are different " + key1 + ", " + key2 + ", " +
+                    key3 + ", " + key4 + " at position " + i);
             }
             if (!(value1 < value2 && value2 < value3 && value3 < value4)) {
-                failed("Sorting is not stable at position " + i +
-                    ". Second values have been changed: " +  value1 + ", " +
+                fail("Sorting is not stable at position " + i +
+                    ". Second values have been changed: " + value1 + ", " +
                     value2 + ", " + value3 + ", " + value4);
             }
         }
     }
 
-    private static Pair[] build(int length, Random random) {
+    private Pair[] build(int length, Random random) {
         Pair[] a = new Pair[length * 4];
 
         for (int i = 0; i < a.length; ) {
@@ -329,222 +381,151 @@
         return a;
     }
 
-    private static final class Pair implements Comparable<Pair> {
-        Pair(int key, int value) {
-            myKey = key;
-            myValue = value;
-        }
-
-        int getKey() {
-            return myKey;
-        }
-
-        int getValue() {
-            return myValue;
-        }
-
-        public int compareTo(Pair pair) {
-            if (myKey < pair.myKey) {
-                return -1;
-            }
-            if (myKey > pair.myKey) {
-                return 1;
-            }
-            return 0;
-        }
-
-        @Override
-        public String toString() {
-            return "(" + myKey + ", " + myValue + ")";
-        }
-
-        private int myKey;
-        private int myValue;
-    }
-
-
-    private static void testAndCheckWithInsertionSort(int length, MyRandom random) {
+    private void testWithInsertionSort(int length, TestRandom random) {
         if (length > 1000) {
             return;
         }
-        ourDescription = "Check sorting with insertion sort";
-        int[] golden = new int[length];
-
-        for (int m = 1; m < 2 * length; m *= 2) {
+        for (int m = 1; m <= length; m <<= 1) {
             for (UnsortedBuilder builder : UnsortedBuilder.values()) {
-                builder.build(golden, m, random);
-                int[] test = golden.clone();
+                builder.build((int[]) gold[0], m, random);
+                convertData(length);
 
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'insertion sort': " + converter +
-                        " " + builder + "random = " + random.getSeed() +
-                        ", length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    Object convertedTest1 = converter.convert(test);
-                    Object convertedTest2 = converter.convert(test);
-                    sort(convertedTest1);
-                    sortByInsertionSort(convertedTest2);
-                    compare(convertedTest1, convertedTest2);
+                for (int i = 0; i < test.length; i++) {
+                    printTestName("Test with insertion sort", random, length,
+                        ", m = " + m + ", " + getType(i) + " " + builder);
+                    sortingHelper.sort(test[i]);
+                    sortByInsertionSort(gold[i]);
+                    compare(test[i], gold[i]);
                 }
             }
         }
         out.println();
     }
 
-    private static void testMergeSort(int length) {
-        if (length < 1000) {
+    private void testMergingSort(int length, TestRandom random) {
+        if (length < (4 << 10)) { // DualPivotQuicksort.MIN_TRY_MERGE_SIZE
             return;
         }
-        ourDescription = "Check merge sorting";
-        int[] golden = new int[length];
-        int period = 67; // java.util.DualPivotQuicksort.MAX_RUN_COUNT
+        final int PERIOD = 50;
+
+        for (int m = PERIOD - 2; m <= PERIOD + 2; m++) {
+            for (MergingBuilder builder : MergingBuilder.values()) {
+                builder.build((int[]) gold[0], m);
+                convertData(length);
 
-        for (int m = period - 2; m <= period + 2; m++) {
-            for (MergeBuilder builder : MergeBuilder.values()) {
-                builder.build(golden, m);
-                int[] test = golden.clone();
+                for (int i = 0; i < test.length; i++) {
+                    printTestName("Test merging sort", random, length,
+                        ", m = " + m + ", " +  getType(i) + " " + builder);
+                    sortingHelper.sort(test[i]);
+                    checkSorted(test[i]);
+                }
+            }
+        }
+        out.println();
+    }
 
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'merge sort': " + converter + " " +
-                        builder + "length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    sort(convertedGolden);
-                    checkSorted(convertedGolden);
+    private void testWithCheckSum(int length, TestRandom random) {
+        for (int m = 1; m <= length; m <<= 1) {
+            for (UnsortedBuilder builder : UnsortedBuilder.values()) {
+                builder.build((int[]) gold[0], m, random);
+                convertData(length);
+
+                for (int i = 0; i < test.length; i++) {
+                    printTestName("Test with check sum", random, length,
+                        ", m = " + m + ", " + getType(i) + " " + builder);
+                    sortingHelper.sort(test[i]);
+                    checkWithCheckSum(test[i], gold[i]);
                 }
             }
         }
         out.println();
     }
 
-    private static void testAndCheckWithCheckSum(int length, MyRandom random) {
-        ourDescription = "Check sorting with check sum";
-        int[] golden = new int[length];
-
-        for (int m = 1; m < 2 * length; m *= 2) {
-            for (UnsortedBuilder builder : UnsortedBuilder.values()) {
-                builder.build(golden, m, random);
-                int[] test = golden.clone();
+    private void testWithScrambling(int length, TestRandom random) {
+        for (int m = 1; m <= length; m <<= 1) {
+            for (SortedBuilder builder : SortedBuilder.values()) {
+                builder.build((int[]) gold[0], m);
+                convertData(length);
 
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'check sum': " + converter +
-                        " " + builder + "random = " + random.getSeed() +
-                        ", length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    Object convertedTest = converter.convert(test);
-                    sort(convertedTest);
-                    checkWithCheckSum(convertedTest, convertedGolden);
-                }
-            }
-        }
-        out.println();
-    }
-
-    private static void testAndCheckWithScrambling(int length, MyRandom random) {
-        ourDescription = "Check sorting with scrambling";
-        int[] golden = new int[length];
-
-        for (int m = 1; m <= 7; m++) {
-            if (m > length) {
-                break;
-            }
-            for (SortedBuilder builder : SortedBuilder.values()) {
-                builder.build(golden, m);
-                int[] test = golden.clone();
-                scramble(test, random);
-
-                for (TypeConverter converter : TypeConverter.values()) {
-                    out.println("Test 'scrambling': " + converter +
-                       " " + builder + "random = " + random.getSeed() +
-                       ", length = " + length + ", m = " + m);
-                    Object convertedGolden = converter.convert(golden);
-                    Object convertedTest = converter.convert(test);
-                    sort(convertedTest);
-                    compare(convertedTest, convertedGolden);
+                for (int i = 0; i < test.length; i++) {
+                    printTestName("Test with scrambling", random, length,
+                        ", m = " + m + ", " + getType(i) + " " + builder);
+                    scramble(test[i], random);
+                    sortingHelper.sort(test[i]);
+                    compare(test[i], gold[i]);
                 }
             }
         }
         out.println();
     }
 
-    private static void testAndCheckFloat(int length, MyRandom random) {
-        ourDescription = "Check float sorting";
-        float[] golden = new float[length];
-        final int MAX = 10;
-        boolean newLine = false;
+    private void testNegativeZero(int length, TestRandom random) {
+        for (int i = 5; i < test.length; i++) {
+            printTestName("Test negative zero -0.0", random, length, " " + getType(i));
+
+            NegativeZeroBuilder builder = NegativeZeroBuilder.values() [i - 5];
+            builder.build(test[i], random);
+
+            sortingHelper.sort(test[i]);
+            checkNegativeZero(test[i]);
+        }
+        out.println();
+    }
 
-        for (int a = 0; a <= MAX; a++) {
-            for (int g = 0; g <= MAX; g++) {
-                for (int z = 0; z <= MAX; z++) {
-                    for (int n = 0; n <= MAX; n++) {
-                        for (int p = 0; p <= MAX; p++) {
-                            if (a + g + z + n + p > length) {
+    private void testFloatingPointSorting(int length, TestRandom random) {
+        if (length < 2) {
+            return;
+        }
+        final int MAX = 13;
+
+        for (int a = 0; a < MAX; a++) {
+            for (int g = 0; g < MAX; g++) {
+                for (int z = 0; z < MAX; z++) {
+                    for (int n = 0; n < MAX; n++) {
+                        for (int p = 0; p < MAX; p++) {
+                            if (a + g + z + n + p != length) {
                                 continue;
                             }
-                            if (a + g + z + n + p < length) {
-                                continue;
+                            for (int i = 5; i < test.length; i++) {
+                                printTestName("Test float-pointing sorting", random, length,
+                                    ", a = " + a + ", g = " + g + ", z = " + z +
+                                    ", n = " + n + ", p = " + p + ", " + getType(i));
+                                FloatingPointBuilder builder = FloatingPointBuilder.values()[i - 5];
+                                builder.build(gold[i], a, g, z, n, p, random);
+                                copy(test[i], gold[i]);
+                                scramble(test[i], random);
+                                sortingHelper.sort(test[i]);
+                                compare(test[i], gold[i], a, n, g);
                             }
-                            for (FloatBuilder builder : FloatBuilder.values()) {
-                                out.println("Test 'float': random = " + random.getSeed() +
-                                   ", length = " + length + ", a = " + a + ", g = " +
-                                   g + ", z = " + z + ", n = " + n + ", p = " + p);
-                                builder.build(golden, a, g, z, n, p, random);
-                                float[] test = golden.clone();
-                                scramble(test, random);
-                                sort(test);
-                                compare(test, golden, a, n, g);
-                            }
-                            newLine = true;
                         }
                     }
                 }
             }
         }
-        if (newLine) {
-            out.println();
+
+        for (int m = 13; m > 4; m--) {
+            int t = length / m;
+            int g = t, z = t, n = t, p = t;
+            int a = length - g - z - n - p;
+
+            for (int i = 5; i < test.length; i++) {
+                printTestName("Test float-pointing sorting", random, length,
+                    ", a = " + a + ", g = " + g + ", z = " + z +
+                    ", n = " + n + ", p = " + p + ", " + getType(i));
+                FloatingPointBuilder builder = FloatingPointBuilder.values() [i - 5];
+                builder.build(gold[i], a, g, z, n, p, random);
+                copy(test[i], gold[i]);
+                scramble(test[i], random);
+                sortingHelper.sort(test[i]);
+                compare(test[i], gold[i], a, n, g);
+            }
         }
+        out.println();
     }
 
-    private static void testAndCheckDouble(int length, MyRandom random) {
-        ourDescription = "Check double sorting";
-        double[] golden = new double[length];
-        final int MAX = 10;
-        boolean newLine = false;
-
-        for (int a = 0; a <= MAX; a++) {
-            for (int g = 0; g <= MAX; g++) {
-                for (int z = 0; z <= MAX; z++) {
-                    for (int n = 0; n <= MAX; n++) {
-                        for (int p = 0; p <= MAX; p++) {
-                            if (a + g + z + n + p > length) {
-                                continue;
-                            }
-                            if (a + g + z + n + p < length) {
-                                continue;
-                            }
-                            for (DoubleBuilder builder : DoubleBuilder.values()) {
-                                out.println("Test 'double': random = " + random.getSeed() +
-                                   ", length = " + length + ", a = " + a + ", g = " +
-                                   g + ", z = " + z + ", n = " + n + ", p = " + p);
-                                builder.build(golden, a, g, z, n, p, random);
-                                double[] test = golden.clone();
-                                scramble(test, random);
-                                sort(test);
-                                compare(test, golden, a, n, g);
-                            }
-                            newLine = true;
-                        }
-                    }
-                }
-            }
-        }
-        if (newLine) {
-            out.println();
-        }
-    }
-
-    private static void prepareSubArray(int[] a, int fromIndex, int toIndex, int m) {
+    private void prepareSubArray(int[] a, int fromIndex, int toIndex) {
         for (int i = 0; i < fromIndex; i++) {
-            a[i] = 0xDEDA;
+            a[i] = A380;
         }
         int middle = (fromIndex + toIndex) >>> 1;
         int k = 0;
@@ -552,338 +533,1112 @@
         for (int i = fromIndex; i < middle; i++) {
             a[i] = k++;
         }
+
         for (int i = middle; i < toIndex; i++) {
             a[i] = k--;
         }
+
         for (int i = toIndex; i < a.length; i++) {
-            a[i] = 0xBABA;
+            a[i] = B747;
         }
     }
 
-    private static void scramble(int[] a, Random random) {
+    private void scramble(Object a, Random random) {
+        if (a instanceof int[]) {
+            scramble((int[]) a, random);
+        } else if (a instanceof long[]) {
+            scramble((long[]) a, random);
+        } else if (a instanceof byte[]) {
+            scramble((byte[]) a, random);
+        } else if (a instanceof char[]) {
+            scramble((char[]) a, random);
+        } else if (a instanceof short[]) {
+            scramble((short[]) a, random);
+        } else if (a instanceof float[]) {
+            scramble((float[]) a, random);
+        } else if (a instanceof double[]) {
+            scramble((double[]) a, random);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
+        }
+    }
+
+    private void scramble(int[] a, Random random) {
         for (int i = 0; i < a.length * 7; i++) {
             swap(a, random.nextInt(a.length), random.nextInt(a.length));
         }
     }
 
-    private static void scramble(float[] a, Random random) {
+    private void scramble(long[] a, Random random) {
+        for (int i = 0; i < a.length * 7; i++) {
+            swap(a, random.nextInt(a.length), random.nextInt(a.length));
+        }
+    }
+
+    private void scramble(byte[] a, Random random) {
         for (int i = 0; i < a.length * 7; i++) {
             swap(a, random.nextInt(a.length), random.nextInt(a.length));
         }
     }
 
-    private static void scramble(double[] a, Random random) {
+    private void scramble(char[] a, Random random) {
+        for (int i = 0; i < a.length * 7; i++) {
+            swap(a, random.nextInt(a.length), random.nextInt(a.length));
+        }
+    }
+
+    private void scramble(short[] a, Random random) {
+        for (int i = 0; i < a.length * 7; i++) {
+            swap(a, random.nextInt(a.length), random.nextInt(a.length));
+        }
+    }
+
+    private void scramble(float[] a, Random random) {
+        for (int i = 0; i < a.length * 7; i++) {
+            swap(a, random.nextInt(a.length), random.nextInt(a.length));
+        }
+    }
+
+    private void scramble(double[] a, Random random) {
         for (int i = 0; i < a.length * 7; i++) {
             swap(a, random.nextInt(a.length), random.nextInt(a.length));
         }
     }
 
-    private static void swap(int[] a, int i, int j) {
-        int t = a[i];
-        a[i] = a[j];
-        a[j] = t;
+    private void swap(int[] a, int i, int j) {
+        int t = a[i]; a[i] = a[j]; a[j] = t;
+    }
+
+    private void swap(long[] a, int i, int j) {
+        long t = a[i]; a[i] = a[j]; a[j] = t;
     }
 
-    private static void swap(float[] a, int i, int j) {
-        float t = a[i];
-        a[i] = a[j];
-        a[j] = t;
+    private void swap(byte[] a, int i, int j) {
+        byte t = a[i]; a[i] = a[j]; a[j] = t;
     }
 
-    private static void swap(double[] a, int i, int j) {
-        double t = a[i];
-        a[i] = a[j];
-        a[j] = t;
+    private void swap(char[] a, int i, int j) {
+        char t = a[i]; a[i] = a[j]; a[j] = t;
+    }
+
+    private void swap(short[] a, int i, int j) {
+        short t = a[i]; a[i] = a[j]; a[j] = t;
     }
 
-    private static enum TypeConverter {
-        INT {
-            Object convert(int[] a) {
-                return a.clone();
-            }
-        },
-        LONG {
-            Object convert(int[] a) {
-                long[] b = new long[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (long) a[i];
-                }
-                return b;
-            }
-        },
-        BYTE {
-            Object convert(int[] a) {
-                byte[] b = new byte[a.length];
+    private void swap(float[] a, int i, int j) {
+        float t = a[i]; a[i] = a[j]; a[j] = t;
+    }
 
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (byte) a[i];
-                }
-                return b;
-            }
-        },
-        SHORT {
-            Object convert(int[] a) {
-                short[] b = new short[a.length];
+    private void swap(double[] a, int i, int j) {
+        double t = a[i]; a[i] = a[j]; a[j] = t;
+    }
 
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (short) a[i];
-                }
-                return b;
-            }
-        },
-        CHAR {
-            Object convert(int[] a) {
-                char[] b = new char[a.length];
+    private void checkWithCheckSum(Object test, Object gold) {
+        checkSorted(test);
+        checkCheckSum(test, gold);
+    }
 
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (char) a[i];
-                }
-                return b;
-            }
-        },
-        FLOAT {
-            Object convert(int[] a) {
-                float[] b = new float[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (float) a[i];
-                }
-                return b;
-            }
-        },
-        DOUBLE {
-            Object convert(int[] a) {
-                double[] b = new double[a.length];
+    private void fail(String message) {
+        err.format("\n*** TEST FAILED ***\n\n%s\n\n", message);
+        throw new RuntimeException("Test failed");
+    }
 
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = (double) a[i];
-                }
-                return b;
-            }
-        },
-        INTEGER {
-            Object convert(int[] a) {
-                Integer[] b = new Integer[a.length];
-
-                for (int i = 0; i < a.length; i++) {
-                    b[i] = new Integer(a[i]);
-                }
-                return b;
-            }
-        };
-
-        abstract Object convert(int[] a);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 9; i++) {
-                name += " ";
-            }
-            return name;
+    private void checkNegativeZero(Object a) {
+        if (a instanceof float[]) {
+            checkNegativeZero((float[]) a);
+        } else if (a instanceof double[]) {
+            checkNegativeZero((double[]) a);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
         }
     }
 
-    private static enum FloatBuilder {
-        SIMPLE {
-            void build(float[] x, int a, int g, int z, int n, int p, Random random) {
-                int fromIndex = 0;
-                float negativeValue = -random.nextFloat();
-                float positiveValue =  random.nextFloat();
-
-                writeValue(x, negativeValue, fromIndex, n);
-                fromIndex += n;
-
-                writeValue(x, -0.0f, fromIndex, g);
-                fromIndex += g;
-
-                writeValue(x, 0.0f, fromIndex, z);
-                fromIndex += z;
-
-                writeValue(x, positiveValue, fromIndex, p);
-                fromIndex += p;
-
-                writeValue(x, Float.NaN, fromIndex, a);
+    private void checkNegativeZero(float[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (Float.floatToRawIntBits(a[i]) == 0 && Float.floatToRawIntBits(a[i + 1]) < 0) {
+                fail(a[i] + " before " + a[i + 1] + " at position " + i);
             }
-        };
-
-        abstract void build(float[] x, int a, int g, int z, int n, int p, Random random);
-    }
-
-    private static enum DoubleBuilder {
-        SIMPLE {
-            void build(double[] x, int a, int g, int z, int n, int p, Random random) {
-                int fromIndex = 0;
-                double negativeValue = -random.nextFloat();
-                double positiveValue =  random.nextFloat();
-
-                writeValue(x, negativeValue, fromIndex, n);
-                fromIndex += n;
-
-                writeValue(x, -0.0d, fromIndex, g);
-                fromIndex += g;
-
-                writeValue(x, 0.0d, fromIndex, z);
-                fromIndex += z;
-
-                writeValue(x, positiveValue, fromIndex, p);
-                fromIndex += p;
-
-                writeValue(x, Double.NaN, fromIndex, a);
-            }
-        };
-
-        abstract void build(double[] x, int a, int g, int z, int n, int p, Random random);
-    }
-
-    private static void writeValue(float[] a, float value, int fromIndex, int count) {
-        for (int i = fromIndex; i < fromIndex + count; i++) {
-            a[i] = value;
         }
     }
 
-    private static void compare(float[] a, float[] b, int numNaN, int numNeg, int numNegZero) {
+    private void checkNegativeZero(double[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (Double.doubleToRawLongBits(a[i]) == 0 && Double.doubleToRawLongBits(a[i + 1]) < 0) {
+                fail(a[i] + " before " + a[i + 1] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(Object a, Object b, int numNaN, int numNeg, int numNegZero) {
+        if (a instanceof float[]) {
+            compare((float[]) a, (float[]) b, numNaN, numNeg, numNegZero);
+        } else if (a instanceof double[]) {
+            compare((double[]) a, (double[]) b, numNaN, numNeg, numNegZero);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
+        }
+    }
+
+    private void compare(float[] a, float[] b, int numNaN, int numNeg, int numNegZero) {
         for (int i = a.length - numNaN; i < a.length; i++) {
             if (a[i] == a[i]) {
-                failed("On position " + i + " must be NaN instead of " + a[i]);
+                fail("There must be NaN instead of " + a[i] + " at position " + i);
             }
         }
         final int NEGATIVE_ZERO = Float.floatToIntBits(-0.0f);
 
         for (int i = numNeg; i < numNeg + numNegZero; i++) {
             if (NEGATIVE_ZERO != Float.floatToIntBits(a[i])) {
-                failed("On position " + i + " must be -0.0 instead of " + a[i]);
+                fail("There must be -0.0 instead of " + a[i] + " at position " + i);
             }
         }
+
         for (int i = 0; i < a.length - numNaN; i++) {
             if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
             }
         }
     }
 
-    private static void writeValue(double[] a, double value, int fromIndex, int count) {
-        for (int i = fromIndex; i < fromIndex + count; i++) {
-            a[i] = value;
-        }
-    }
-
-    private static void compare(double[] a, double[] b, int numNaN, int numNeg, int numNegZero) {
+    private void compare(double[] a, double[] b, int numNaN, int numNeg, int numNegZero) {
         for (int i = a.length - numNaN; i < a.length; i++) {
             if (a[i] == a[i]) {
-                failed("On position " + i + " must be NaN instead of " + a[i]);
+                fail("There must be NaN instead of " + a[i] + " at position " + i);
             }
         }
         final long NEGATIVE_ZERO = Double.doubleToLongBits(-0.0d);
 
         for (int i = numNeg; i < numNeg + numNegZero; i++) {
             if (NEGATIVE_ZERO != Double.doubleToLongBits(a[i])) {
-                failed("On position " + i + " must be -0.0 instead of " + a[i]);
+                fail("There must be -0.0 instead of " + a[i] + " at position " + i);
+            }
+        }
+
+        for (int i = 0; i < a.length - numNaN; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(Object a, Object b) {
+        if (a instanceof int[]) {
+            compare((int[]) a, (int[]) b);
+        } else if (a instanceof long[]) {
+            compare((long[]) a, (long[]) b);
+        } else if (a instanceof byte[]) {
+            compare((byte[]) a, (byte[]) b);
+        } else if (a instanceof char[]) {
+            compare((char[]) a, (char[]) b);
+        } else if (a instanceof short[]) {
+            compare((short[]) a, (short[]) b);
+        } else if (a instanceof float[]) {
+            compare((float[]) a, (float[]) b);
+        } else if (a instanceof double[]) {
+            compare((double[]) a, (double[]) b);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
+        }
+    }
+
+    private void compare(int[] a, int[] b) {
+        for (int i = 0; i < a.length; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(long[] a, long[] b) {
+        for (int i = 0; i < a.length; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(byte[] a, byte[] b) {
+        for (int i = 0; i < a.length; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(char[] a, char[] b) {
+        for (int i = 0; i < a.length; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(short[] a, short[] b) {
+        for (int i = 0; i < a.length; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(float[] a, float[] b) {
+        for (int i = 0; i < a.length; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
+            }
+        }
+    }
+
+    private void compare(double[] a, double[] b) {
+        for (int i = 0; i < a.length; i++) {
+            if (a[i] != b[i]) {
+                fail("There must be " + b[i] + " instead of " + a[i] + " at position " + i);
             }
         }
-        for (int i = 0; i < a.length - numNaN; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
+    }
+
+    private String getType(int i) {
+        Object a = test[i];
+
+        if (a instanceof int[]) {
+            return "INT   ";
+        }
+        if (a instanceof long[]) {
+            return "LONG  ";
+        }
+        if (a instanceof byte[]) {
+            return "BYTE  ";
+        }
+        if (a instanceof char[]) {
+            return "CHAR  ";
+        }
+        if (a instanceof short[]) {
+            return "SHORT ";
+        }
+        if (a instanceof float[]) {
+            return "FLOAT ";
+        }
+        if (a instanceof double[]) {
+            return "DOUBLE";
+        }
+        fail("Unknown type of array: " + a.getClass().getName());
+        return null;
+    }
+
+    private void checkSorted(Object a) {
+        if (a instanceof int[]) {
+            checkSorted((int[]) a);
+        } else if (a instanceof long[]) {
+            checkSorted((long[]) a);
+        } else if (a instanceof byte[]) {
+            checkSorted((byte[]) a);
+        } else if (a instanceof char[]) {
+            checkSorted((char[]) a);
+        } else if (a instanceof short[]) {
+            checkSorted((short[]) a);
+        } else if (a instanceof float[]) {
+            checkSorted((float[]) a);
+        } else if (a instanceof double[]) {
+            checkSorted((double[]) a);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
+        }
+    }
+
+    private void checkSorted(int[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+    }
+
+    private void checkSorted(long[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+    }
+
+    private void checkSorted(byte[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+    }
+
+    private void checkSorted(char[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+    }
+
+    private void checkSorted(short[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+    }
+
+    private void checkSorted(float[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+    }
+
+    private void checkSorted(double[] a) {
+        for (int i = 0; i < a.length - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
             }
         }
     }
 
-    private static enum SortedBuilder {
-        REPEATED {
-            void build(int[] a, int m) {
-                int period = a.length / m;
-                int i = 0;
-                int k = 0;
+    private void checkCheckSum(Object test, Object gold) {
+        if (checkSumXor(test) != checkSumXor(gold)) {
+            fail("Original and sorted arrays are not identical [^]");
+        }
+        if (checkSumPlus(test) != checkSumPlus(gold)) {
+            fail("Original and sorted arrays are not identical [+]");
+        }
+    }
+
+    private int checkSumXor(Object a) {
+        if (a instanceof int[]) {
+            return checkSumXor((int[]) a);
+        }
+        if (a instanceof long[]) {
+            return checkSumXor((long[]) a);
+        }
+        if (a instanceof byte[]) {
+            return checkSumXor((byte[]) a);
+        }
+        if (a instanceof char[]) {
+            return checkSumXor((char[]) a);
+        }
+        if (a instanceof short[]) {
+            return checkSumXor((short[]) a);
+        }
+        if (a instanceof float[]) {
+            return checkSumXor((float[]) a);
+        }
+        if (a instanceof double[]) {
+            return checkSumXor((double[]) a);
+        }
+        fail("Unknown type of array: " + a.getClass().getName());
+        return -1;
+    }
+
+    private int checkSumXor(int[] a) {
+        int checkSum = 0;
+
+        for (int e : a) {
+            checkSum ^= e;
+        }
+        return checkSum;
+    }
+
+    private int checkSumXor(long[] a) {
+        long checkSum = 0;
+
+        for (long e : a) {
+            checkSum ^= e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumXor(byte[] a) {
+        byte checkSum = 0;
+
+        for (byte e : a) {
+            checkSum ^= e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumXor(char[] a) {
+        char checkSum = 0;
+
+        for (char e : a) {
+            checkSum ^= e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumXor(short[] a) {
+        short checkSum = 0;
+
+        for (short e : a) {
+            checkSum ^= e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumXor(float[] a) {
+        int checkSum = 0;
+
+        for (float e : a) {
+            checkSum ^= (int) e;
+        }
+        return checkSum;
+    }
+
+    private int checkSumXor(double[] a) {
+        int checkSum = 0;
+
+        for (double e : a) {
+            checkSum ^= (int) e;
+        }
+        return checkSum;
+    }
+
+    private int checkSumPlus(Object a) {
+        if (a instanceof int[]) {
+            return checkSumPlus((int[]) a);
+        }
+        if (a instanceof long[]) {
+            return checkSumPlus((long[]) a);
+        }
+        if (a instanceof byte[]) {
+            return checkSumPlus((byte[]) a);
+        }
+        if (a instanceof char[]) {
+            return checkSumPlus((char[]) a);
+        }
+        if (a instanceof short[]) {
+            return checkSumPlus((short[]) a);
+        }
+        if (a instanceof float[]) {
+            return checkSumPlus((float[]) a);
+        }
+        if (a instanceof double[]) {
+            return checkSumPlus((double[]) a);
+        }
+        fail("Unknown type of array: " + a.getClass().getName());
+        return -1;
+    }
+
+    private int checkSumPlus(int[] a) {
+        int checkSum = 0;
+
+        for (int e : a) {
+            checkSum += e;
+        }
+        return checkSum;
+    }
+
+    private int checkSumPlus(long[] a) {
+        long checkSum = 0;
+
+        for (long e : a) {
+            checkSum += e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumPlus(byte[] a) {
+        byte checkSum = 0;
+
+        for (byte e : a) {
+            checkSum += e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumPlus(char[] a) {
+        char checkSum = 0;
+
+        for (char e : a) {
+            checkSum += e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumPlus(short[] a) {
+        short checkSum = 0;
+
+        for (short e : a) {
+            checkSum += e;
+        }
+        return (int) checkSum;
+    }
+
+    private int checkSumPlus(float[] a) {
+        int checkSum = 0;
+
+        for (float e : a) {
+            checkSum += (int) e;
+        }
+        return checkSum;
+    }
+
+    private int checkSumPlus(double[] a) {
+        int checkSum = 0;
+
+        for (double e : a) {
+            checkSum += (int) e;
+        }
+        return checkSum;
+    }
+
+    private void sortByInsertionSort(Object a) {
+        if (a instanceof int[]) {
+            sortByInsertionSort((int[]) a);
+        } else if (a instanceof long[]) {
+            sortByInsertionSort((long[]) a);
+        } else if (a instanceof byte[]) {
+            sortByInsertionSort((byte[]) a);
+        } else if (a instanceof char[]) {
+            sortByInsertionSort((char[]) a);
+        } else if (a instanceof short[]) {
+            sortByInsertionSort((short[]) a);
+        } else if (a instanceof float[]) {
+            sortByInsertionSort((float[]) a);
+        } else if (a instanceof double[]) {
+            sortByInsertionSort((double[]) a);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
+        }
+    }
+
+    private void sortByInsertionSort(int[] a) {
+        for (int j, i = 1; i < a.length; i++) {
+            int ai = a[i];
+
+            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
+                a[j + 1] = a[j];
+            }
+            a[j + 1] = ai;
+        }
+    }
+
+    private void sortByInsertionSort(long[] a) {
+        for (int j, i = 1; i < a.length; i++) {
+            long ai = a[i];
+
+            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
+                a[j + 1] = a[j];
+            }
+            a[j + 1] = ai;
+        }
+    }
+
+    private void sortByInsertionSort(byte[] a) {
+        for (int j, i = 1; i < a.length; i++) {
+            byte ai = a[i];
+
+            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
+                a[j + 1] = a[j];
+            }
+            a[j + 1] = ai;
+        }
+    }
+
+    private void sortByInsertionSort(char[] a) {
+        for (int j, i = 1; i < a.length; i++) {
+            char ai = a[i];
+
+            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
+                a[j + 1] = a[j];
+            }
+            a[j + 1] = ai;
+        }
+    }
+
+    private void sortByInsertionSort(short[] a) {
+        for (int j, i = 1; i < a.length; i++) {
+            short ai = a[i];
+
+            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
+                a[j + 1] = a[j];
+            }
+            a[j + 1] = ai;
+        }
+    }
+
+    private void sortByInsertionSort(float[] a) {
+        for (int j, i = 1; i < a.length; i++) {
+            float ai = a[i];
+
+            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
+                a[j + 1] = a[j];
+            }
+            a[j + 1] = ai;
+        }
+    }
+
+    private void sortByInsertionSort(double[] a) {
+        for (int j, i = 1; i < a.length; i++) {
+            double ai = a[i];
+
+            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
+                a[j + 1] = a[j];
+            }
+            a[j + 1] = ai;
+        }
+    }
+
+    private void checkSubArray(Object a, int fromIndex, int toIndex) {
+        if (a instanceof int[]) {
+            checkSubArray((int[]) a, fromIndex, toIndex);
+        } else if (a instanceof long[]) {
+            checkSubArray((long[]) a, fromIndex, toIndex);
+        } else if (a instanceof byte[]) {
+            checkSubArray((byte[]) a, fromIndex, toIndex);
+        } else if (a instanceof char[]) {
+            checkSubArray((char[]) a, fromIndex, toIndex);
+        } else if (a instanceof short[]) {
+            checkSubArray((short[]) a, fromIndex, toIndex);
+        } else if (a instanceof float[]) {
+            checkSubArray((float[]) a, fromIndex, toIndex);
+        } else if (a instanceof double[]) {
+            checkSubArray((double[]) a, fromIndex, toIndex);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
+        }
+    }
+
+    private void checkSubArray(int[] a, int fromIndex, int toIndex) {
+        for (int i = 0; i < fromIndex; i++) {
+            if (a[i] != A380) {
+                fail("Range sort changes left element at position " + i + hex(a[i], A380));
+            }
+        }
+
+        for (int i = fromIndex; i < toIndex - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+
+        for (int i = toIndex; i < a.length; i++) {
+            if (a[i] != B747) {
+                fail("Range sort changes right element at position " + i + hex(a[i], B747));
+            }
+        }
+    }
+
+    private void checkSubArray(long[] a, int fromIndex, int toIndex) {
+        for (int i = 0; i < fromIndex; i++) {
+            if (a[i] != (long) A380) {
+                fail("Range sort changes left element at position " + i + hex(a[i], A380));
+            }
+        }
 
-                while (true) {
-                    for (int t = 1; t <= period; t++) {
-                        if (i >= a.length) {
-                            return;
-                        }
-                        a[i++] = k;
-                    }
-                    if (i >= a.length) {
-                        return;
-                    }
-                    k++;
+        for (int i = fromIndex; i < toIndex - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+
+        for (int i = toIndex; i < a.length; i++) {
+            if (a[i] != (long) B747) {
+                fail("Range sort changes right element at position " + i + hex(a[i], B747));
+            }
+        }
+    }
+
+    private void checkSubArray(byte[] a, int fromIndex, int toIndex) {
+        for (int i = 0; i < fromIndex; i++) {
+            if (a[i] != (byte) A380) {
+                fail("Range sort changes left element at position " + i + hex(a[i], A380));
+            }
+        }
+
+        for (int i = fromIndex; i < toIndex - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+
+        for (int i = toIndex; i < a.length; i++) {
+            if (a[i] != (byte) B747) {
+                fail("Range sort changes right element at position " + i + hex(a[i], B747));
+            }
+        }
+    }
+
+    private void checkSubArray(char[] a, int fromIndex, int toIndex) {
+        for (int i = 0; i < fromIndex; i++) {
+            if (a[i] != (char) A380) {
+                fail("Range sort changes left element at position " + i + hex(a[i], A380));
+            }
+        }
+
+        for (int i = fromIndex; i < toIndex - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+
+        for (int i = toIndex; i < a.length; i++) {
+            if (a[i] != (char) B747) {
+                fail("Range sort changes right element at position " + i + hex(a[i], B747));
+            }
+        }
+    }
+
+    private void checkSubArray(short[] a, int fromIndex, int toIndex) {
+        for (int i = 0; i < fromIndex; i++) {
+            if (a[i] != (short) A380) {
+                fail("Range sort changes left element at position " + i + hex(a[i], A380));
+            }
+        }
+
+        for (int i = fromIndex; i < toIndex - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+
+        for (int i = toIndex; i < a.length; i++) {
+            if (a[i] != (short) B747) {
+                fail("Range sort changes right element at position " + i + hex(a[i], B747));
+            }
+        }
+    }
+
+    private void checkSubArray(float[] a, int fromIndex, int toIndex) {
+        for (int i = 0; i < fromIndex; i++) {
+            if (a[i] != (float) A380) {
+                fail("Range sort changes left element at position " + i + hex((long) a[i], A380));
+            }
+        }
+
+        for (int i = fromIndex; i < toIndex - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+
+        for (int i = toIndex; i < a.length; i++) {
+            if (a[i] != (float) B747) {
+                fail("Range sort changes right element at position " + i + hex((long) a[i], B747));
+            }
+        }
+    }
+
+    private void checkSubArray(double[] a, int fromIndex, int toIndex) {
+        for (int i = 0; i < fromIndex; i++) {
+            if (a[i] != (double) A380) {
+                fail("Range sort changes left element at position " + i + hex((long) a[i], A380));
+            }
+        }
+
+        for (int i = fromIndex; i < toIndex - 1; i++) {
+            if (a[i] > a[i + 1]) {
+                fail("Array is not sorted at " + i + "-th position: " + a[i] + " and " + a[i + 1]);
+            }
+        }
+
+        for (int i = toIndex; i < a.length; i++) {
+            if (a[i] != (double) B747) {
+                fail("Range sort changes right element at position " + i + hex((long) a[i], B747));
+            }
+        }
+    }
+
+    private void checkRange(Object a, int m) {
+        if (a instanceof int[]) {
+            checkRange((int[]) a, m);
+        } else if (a instanceof long[]) {
+            checkRange((long[]) a, m);
+        } else if (a instanceof byte[]) {
+            checkRange((byte[]) a, m);
+        } else if (a instanceof char[]) {
+            checkRange((char[]) a, m);
+        } else if (a instanceof short[]) {
+            checkRange((short[]) a, m);
+        } else if (a instanceof float[]) {
+            checkRange((float[]) a, m);
+        } else if (a instanceof double[]) {
+            checkRange((double[]) a, m);
+        } else {
+            fail("Unknown type of array: " + a.getClass().getName());
+        }
+    }
+
+    private void checkRange(int[] a, int m) {
+        try {
+            sortingHelper.sort(a, m + 1, m);
+            fail(sortingHelper + " does not throw IllegalArgumentException " +
+                "as expected: fromIndex = " + (m + 1) + " toIndex = " + m);
+        } catch (IllegalArgumentException iae) {
+            try {
+                sortingHelper.sort(a, -m, a.length);
+                fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                    "as expected: fromIndex = " + (-m));
+            } catch (ArrayIndexOutOfBoundsException aoe) {
+                try {
+                    sortingHelper.sort(a, 0, a.length + m);
+                    fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                        "as expected: toIndex = " + (a.length + m));
+                } catch (ArrayIndexOutOfBoundsException expected) {}
+            }
+        }
+    }
+
+    private void checkRange(long[] a, int m) {
+        try {
+            sortingHelper.sort(a, m + 1, m);
+            fail(sortingHelper + " does not throw IllegalArgumentException " +
+                "as expected: fromIndex = " + (m + 1) + " toIndex = " + m);
+        } catch (IllegalArgumentException iae) {
+            try {
+                sortingHelper.sort(a, -m, a.length);
+                fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                    "as expected: fromIndex = " + (-m));
+            } catch (ArrayIndexOutOfBoundsException aoe) {
+                try {
+                    sortingHelper.sort(a, 0, a.length + m);
+                    fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                        "as expected: toIndex = " + (a.length + m));
+                } catch (ArrayIndexOutOfBoundsException expected) {}
+            }
+        }
+    }
+
+    private void checkRange(byte[] a, int m) {
+        try {
+            sortingHelper.sort(a, m + 1, m);
+            fail(sortingHelper + " does not throw IllegalArgumentException " +
+                "as expected: fromIndex = " + (m + 1) + " toIndex = " + m);
+        } catch (IllegalArgumentException iae) {
+            try {
+                sortingHelper.sort(a, -m, a.length);
+                fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                    "as expected: fromIndex = " + (-m));
+            } catch (ArrayIndexOutOfBoundsException aoe) {
+                try {
+                    sortingHelper.sort(a, 0, a.length + m);
+                    fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                        "as expected: toIndex = " + (a.length + m));
+                } catch (ArrayIndexOutOfBoundsException expected) {}
+            }
+        }
+    }
+
+    private void checkRange(char[] a, int m) {
+        try {
+            sortingHelper.sort(a, m + 1, m);
+            fail(sortingHelper + " does not throw IllegalArgumentException " +
+                "as expected: fromIndex = " + (m + 1) + " toIndex = " + m);
+        } catch (IllegalArgumentException iae) {
+            try {
+                sortingHelper.sort(a, -m, a.length);
+                fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                    "as expected: fromIndex = " + (-m));
+            } catch (ArrayIndexOutOfBoundsException aoe) {
+                try {
+                    sortingHelper.sort(a, 0, a.length + m);
+                    fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                        "as expected: toIndex = " + (a.length + m));
+                } catch (ArrayIndexOutOfBoundsException expected) {}
+            }
+        }
+    }
+
+    private void checkRange(short[] a, int m) {
+        try {
+            sortingHelper.sort(a, m + 1, m);
+            fail(sortingHelper + " does not throw IllegalArgumentException " +
+                "as expected: fromIndex = " + (m + 1) + " toIndex = " + m);
+        } catch (IllegalArgumentException iae) {
+            try {
+                sortingHelper.sort(a, -m, a.length);
+                fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                    "as expected: fromIndex = " + (-m));
+            } catch (ArrayIndexOutOfBoundsException aoe) {
+                try {
+                    sortingHelper.sort(a, 0, a.length + m);
+                    fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                        "as expected: toIndex = " + (a.length + m));
+                } catch (ArrayIndexOutOfBoundsException expected) {}
+            }
+        }
+    }
+
+    private void checkRange(float[] a, int m) {
+        try {
+            sortingHelper.sort(a, m + 1, m);
+            fail(sortingHelper + " does not throw IllegalArgumentException " +
+                "as expected: fromIndex = " + (m + 1) + " toIndex = " + m);
+        } catch (IllegalArgumentException iae) {
+            try {
+                sortingHelper.sort(a, -m, a.length);
+                fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                    "as expected: fromIndex = " + (-m));
+            } catch (ArrayIndexOutOfBoundsException aoe) {
+                try {
+                    sortingHelper.sort(a, 0, a.length + m);
+                    fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                        "as expected: toIndex = " + (a.length + m));
+                } catch (ArrayIndexOutOfBoundsException expected) {}
+            }
+        }
+    }
+
+    private void checkRange(double[] a, int m) {
+        try {
+            sortingHelper.sort(a, m + 1, m);
+            fail(sortingHelper + " does not throw IllegalArgumentException " +
+                "as expected: fromIndex = " + (m + 1) + " toIndex = " + m);
+        } catch (IllegalArgumentException iae) {
+            try {
+                sortingHelper.sort(a, -m, a.length);
+                fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                    "as expected: fromIndex = " + (-m));
+            } catch (ArrayIndexOutOfBoundsException aoe) {
+                try {
+                    sortingHelper.sort(a, 0, a.length + m);
+                    fail(sortingHelper + " does not throw ArrayIndexOutOfBoundsException " +
+                        "as expected: toIndex = " + (a.length + m));
+                } catch (ArrayIndexOutOfBoundsException expected) {}
+            }
+        }
+    }
+
+    private void copy(Object dst, Object src) {
+        if (src instanceof float[]) {
+            copy((float[]) dst, (float[]) src);
+        } else if (src instanceof double[]) {
+            copy((double[]) dst, (double[]) src);
+        } else {
+            fail("Unknown type of array: " + src.getClass().getName());
+        }
+    }
+
+    private void copy(float[] dst, float[] src) {
+        System.arraycopy(src, 0, dst, 0, src.length);
+    }
+
+    private void copy(double[] dst, double[] src) {
+        System.arraycopy(src, 0, dst, 0, src.length);
+    }
+
+    private void printTestName(String test, TestRandom random, int length) {
+        printTestName(test, random, length, "");
+    }
+
+    private void createData(int length) {
+        gold = new Object[] {
+            new int[length], new long[length],
+            new byte[length], new char[length], new short[length],
+            new float[length], new double[length]
+        };
+
+        test = new Object[] {
+            new int[length], new long[length],
+            new byte[length], new char[length], new short[length],
+            new float[length], new double[length]
+        };
+    }
+
+    private void convertData(int length) {
+        for (int i = 1; i < gold.length; i++) {
+            TypeConverter converter = TypeConverter.values()[i - 1];
+            converter.convert((int[])gold[0], gold[i]);
+        }
+
+        for (int i = 0; i < gold.length; i++) {
+            System.arraycopy(gold[i], 0, test[i], 0, length);
+        }
+    }
+
+    private String hex(long a, int b) {
+        return ": " + Long.toHexString(a) + ", must be " + Integer.toHexString(b);
+    }
+
+    private void printTestName(String test, TestRandom random, int length, String message) {
+        out.println( "[" + sortingHelper + "] '" + test +
+            "' length = " + length + ", random = " + random + message);
+    }
+
+    private static enum TypeConverter {
+        LONG {
+            void convert(int[] src, Object dst) {
+                long[] b = (long[]) dst;
+
+                for (int i = 0; i < src.length; i++) {
+                    b[i] = (long) src[i];
                 }
             }
         },
-        ORGAN_PIPES {
-            void build(int[] a, int m) {
-                int i = 0;
-                int k = m;
+
+        BYTE {
+            void convert(int[] src, Object dst) {
+                byte[] b = (byte[]) dst;
+
+                for (int i = 0; i < src.length; i++) {
+                    b[i] = (byte) src[i];
+                }
+            }
+        },
+
+        CHAR {
+            void convert(int[] src, Object dst) {
+                char[] b = (char[]) dst;
+
+                for (int i = 0; i < src.length; i++) {
+                    b[i] = (char) src[i];
+                }
+            }
+        },
+
+        SHORT {
+            void convert(int[] src, Object dst) {
+                short[] b = (short[]) dst;
+
+                for (int i = 0; i < src.length; i++) {
+                    b[i] = (short) src[i];
+                }
+            }
+        },
 
-                while (true) {
-                    for (int t = 1; t <= m; t++) {
-                        if (i >= a.length) {
-                            return;
-                        }
-                        a[i++] = k;
-                    }
+        FLOAT {
+            void convert(int[] src, Object dst) {
+                float[] b = (float[]) dst;
+
+                for (int i = 0; i < src.length; i++) {
+                    b[i] = (float) src[i];
+                }
+            }
+        },
+
+        DOUBLE {
+            void convert(int[] src, Object dst) {
+                double[] b = (double[]) dst;
+
+                for (int i = 0; i < src.length; i++) {
+                    b[i] = (double) src[i];
+                }
+            }
+        };
+
+        abstract void convert(int[] src, Object dst);
+    }
+
+    private static enum SortedBuilder {
+        STEPS {
+            void build(int[] a, int m) {
+                for (int i = 0; i < m; i++) {
+                    a[i] = 0;
+                }
+
+                for (int i = m; i < a.length; i++) {
+                    a[i] = 1;
                 }
             }
         };
 
         abstract void build(int[] a, int m);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 12; i++) {
-                name += " ";
-            }
-            return name;
-        }
-    }
-
-    private static enum MergeBuilder {
-        ASCENDING {
-            void build(int[] a, int m) {
-                int period = a.length / m;
-                int v = 1, i = 0;
-
-                for (int k = 0; k < m; k++) {
-                    v = 1;
-                    for (int p = 0; p < period; p++) {
-                        a[i++] = v++;
-                    }
-                }
-                for (int j = i; j < a.length - 1; j++) {
-                    a[j] = v++;
-                }
-                a[a.length - 1] = 0;
-            }
-        },
-        DESCENDING {
-            void build(int[] a, int m) {
-                int period = a.length / m;
-                int v = -1, i = 0;
-
-                for (int k = 0; k < m; k++) {
-                    v = -1;
-                    for (int p = 0; p < period; p++) {
-                        a[i++] = v--;
-                    }
-                }
-                for (int j = i; j < a.length - 1; j++) {
-                    a[j] = v--;
-                }
-                a[a.length - 1] = 0;
-            }
-        };
-
-        abstract void build(int[] a, int m);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 12; i++) {
-                name += " ";
-            }
-            return name;
-        }
     }
 
     private static enum UnsortedBuilder {
@@ -894,6 +1649,7 @@
                 }
             }
         },
+
         ASCENDING {
             void build(int[] a, int m, Random random) {
                 for (int i = 0; i < a.length; i++) {
@@ -901,6 +1657,7 @@
                 }
             }
         },
+
         DESCENDING {
             void build(int[] a, int m, Random random) {
                 for (int i = 0; i < a.length; i++) {
@@ -908,13 +1665,15 @@
                 }
             }
         },
-        ALL_EQUAL {
+
+        EQUAL {
             void build(int[] a, int m, Random random) {
                 for (int i = 0; i < a.length; i++) {
                     a[i] = m;
                 }
             }
         },
+
         SAW {
             void build(int[] a, int m, Random random) {
                 int incCount = 1;
@@ -941,6 +1700,7 @@
                 }
             }
         },
+
         REPEATED {
             void build(int[] a, int m, Random random) {
                 for (int i = 0; i < a.length; i++) {
@@ -948,6 +1708,7 @@
                 }
             }
         },
+
         DUPLICATED {
             void build(int[] a, int m, Random random) {
                 for (int i = 0; i < a.length; i++) {
@@ -955,6 +1716,7 @@
                 }
             }
         },
+
         ORGAN_PIPES {
             void build(int[] a, int m, Random random) {
                 int middle = a.length / (m + 1);
@@ -962,11 +1724,13 @@
                 for (int i = 0; i < middle; i++) {
                     a[i] = i;
                 }
+
                 for (int i = middle; i < a.length; i++) {
                     a[i] = a.length - i - 1;
                 }
             }
         },
+
         STAGGER {
             void build(int[] a, int m, Random random) {
                 for (int i = 0; i < a.length; i++) {
@@ -974,6 +1738,7 @@
                 }
             }
         },
+
         PLATEAU {
             void build(int[] a, int m, Random random) {
                 for (int i = 0; i < a.length; i++) {
@@ -981,1064 +1746,271 @@
                 }
             }
         },
+
         SHUFFLE {
             void build(int[] a, int m, Random random) {
                 int x = 0, y = 0;
+
                 for (int i = 0; i < a.length; i++) {
                     a[i] = random.nextBoolean() ? (x += 2) : (y += 2);
                 }
             }
+        },
+
+        LATCH {
+            void build(int[] a, int m, Random random) {
+                int max = a.length / m;
+                max = max < 2 ? 2 : max;
+
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = i % max;
+                }
+            }
         };
 
         abstract void build(int[] a, int m, Random random);
-
-        @Override public String toString() {
-            String name = name();
-
-            for (int i = name.length(); i < 12; i++) {
-                name += " ";
-            }
-            return name;
-        }
-    }
-
-    private static void checkWithCheckSum(Object test, Object golden) {
-        checkSorted(test);
-        checkCheckSum(test, golden);
-    }
-
-    private static void failed(String message) {
-        err.format("\n*** TEST FAILED - %s.\n\n%s.\n\n", ourDescription, message);
-        throw new RuntimeException("Test failed - see log file for details");
-    }
-
-    private static void failedSort(int index, String value1, String value2) {
-        failed("Array is not sorted at " + index + "-th position: " +
-            value1 + " and " + value2);
-    }
-
-    private static void failedCompare(int index, String value1, String value2) {
-        failed("On position " + index + " must be " + value2 + " instead of " + value1);
-    }
-
-    private static void compare(Object test, Object golden) {
-        if (test instanceof int[]) {
-            compare((int[]) test, (int[]) golden);
-        } else if (test instanceof long[]) {
-            compare((long[]) test, (long[]) golden);
-        } else if (test instanceof short[]) {
-            compare((short[]) test, (short[]) golden);
-        } else if (test instanceof byte[]) {
-            compare((byte[]) test, (byte[]) golden);
-        } else if (test instanceof char[]) {
-            compare((char[]) test, (char[]) golden);
-        } else if (test instanceof float[]) {
-            compare((float[]) test, (float[]) golden);
-        } else if (test instanceof double[]) {
-            compare((double[]) test, (double[]) golden);
-        } else if (test instanceof Integer[]) {
-            compare((Integer[]) test, (Integer[]) golden);
-        } else {
-            failed("Unknow type of array: " + test + " of class " +
-                test.getClass().getName());
-        }
-    }
-
-    private static void compare(int[] a, int[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(long[] a, long[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(short[] a, short[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(byte[] a, byte[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
-
-    private static void compare(char[] a, char[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
     }
 
-    private static void compare(float[] a, float[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
+    private static enum MergingBuilder {
+        ASCENDING {
+            void build(int[] a, int m) {
+                int period = a.length / m;
+                int v = 1, i = 0;
+
+                for (int k = 0; k < m; k++) {
+                    v = 1;
 
-    private static void compare(double[] a, double[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i] != b[i]) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
+                    for (int p = 0; p < period; p++) {
+                        a[i++] = v++;
+                    }
+                }
 
-    private static void compare(Integer[] a, Integer[] b) {
-        for (int i = 0; i < a.length; i++) {
-            if (a[i].compareTo(b[i]) != 0) {
-                failedCompare(i, "" + a[i], "" + b[i]);
-            }
-        }
-    }
+                for (int j = i; j < a.length - 1; j++) {
+                    a[j] = v++;
+                }
 
-    private static void checkSorted(Object object) {
-        if (object instanceof int[]) {
-            checkSorted((int[]) object);
-        } else if (object instanceof long[]) {
-            checkSorted((long[]) object);
-        } else if (object instanceof short[]) {
-            checkSorted((short[]) object);
-        } else if (object instanceof byte[]) {
-            checkSorted((byte[]) object);
-        } else if (object instanceof char[]) {
-            checkSorted((char[]) object);
-        } else if (object instanceof float[]) {
-            checkSorted((float[]) object);
-        } else if (object instanceof double[]) {
-            checkSorted((double[]) object);
-        } else if (object instanceof Integer[]) {
-            checkSorted((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
+                a[a.length - 1] = 0;
+            }
+        },
+
+        DESCENDING {
+            void build(int[] a, int m) {
+                int period = a.length / m;
+                int v = -1, i = 0;
 
-    private static void checkSorted(int[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
+                for (int k = 0; k < m; k++) {
+                    v = -1;
+
+                    for (int p = 0; p < period; p++) {
+                        a[i++] = v--;
+                    }
+                }
+
+                for (int j = i; j < a.length - 1; j++) {
+                    a[j] = v--;
+                }
+
+                a[a.length - 1] = 0;
             }
-        }
-    }
-
-    private static void checkSorted(long[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
+        },
 
-    private static void checkSorted(short[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
+        POINT {
+            void build(int[] a, int m) {
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = 0;
+                }
+                a[a.length / 2] = m;
             }
-        }
-    }
+        },
 
-    private static void checkSorted(byte[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
+        LINE {
+            void build(int[] a, int m) {
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = i;
+                }
+                reverse(a, 0, a.length - 1);
             }
-        }
-    }
+        },
 
-    private static void checkSorted(char[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-    }
-
-    private static void checkSorted(float[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
+        PEARL {
+            void build(int[] a, int m) {
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = i;
+                }
+                reverse(a, 0, 2);
             }
-        }
-    }
+        },
+
+        RING {
+            void build(int[] a, int m) {
+                int k1 = a.length / 3;
+                int k2 = a.length / 3 * 2;
+                int level = a.length / 3;
 
-    private static void checkSorted(double[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
+                for (int i = 0, k = level; i < k1; i++) {
+                    a[i] = k--;
+                }
+
+                for (int i = k1; i < k2; i++) {
+                    a[i] = 0;
+                }
+
+                for (int i = k2, k = level; i < a.length; i++) {
+                    a[i] = k--;
+                }
             }
-        }
-    }
+        };
+
+        abstract void build(int[] a, int m);
 
-    private static void checkSorted(Integer[] a) {
-        for (int i = 0; i < a.length - 1; i++) {
-            if (a[i].intValue() > a[i + 1].intValue()) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
+        private static void reverse(int[] a, int lo, int hi) {
+            for (--hi; lo < hi; ) {
+                int tmp = a[lo];
+                a[lo++] = a[hi];
+                a[hi--] = tmp;
             }
         }
     }
 
-    private static void checkCheckSum(Object test, Object golden) {
-        if (checkSumXor(test) != checkSumXor(golden)) {
-            failed("Original and sorted arrays are not identical [xor]");
-        }
-        if (checkSumPlus(test) != checkSumPlus(golden)) {
-            failed("Original and sorted arrays are not identical [plus]");
-        }
-    }
-
-    private static int checkSumXor(Object object) {
-        if (object instanceof int[]) {
-            return checkSumXor((int[]) object);
-        } else if (object instanceof long[]) {
-            return checkSumXor((long[]) object);
-        } else if (object instanceof short[]) {
-            return checkSumXor((short[]) object);
-        } else if (object instanceof byte[]) {
-            return checkSumXor((byte[]) object);
-        } else if (object instanceof char[]) {
-            return checkSumXor((char[]) object);
-        } else if (object instanceof float[]) {
-            return checkSumXor((float[]) object);
-        } else if (object instanceof double[]) {
-            return checkSumXor((double[]) object);
-        } else if (object instanceof Integer[]) {
-            return checkSumXor((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-            return -1;
-        }
-    }
-
-    private static int checkSumXor(Integer[] a) {
-        int checkSum = 0;
-
-        for (Integer e : a) {
-            checkSum ^= e.intValue();
-        }
-        return checkSum;
-    }
-
-    private static int checkSumXor(int[] a) {
-        int checkSum = 0;
+    private static enum NegativeZeroBuilder {
+        FLOAT {
+            void build(Object o, Random random) {
+                float[] a = (float[]) o;
 
-        for (int e : a) {
-            checkSum ^= e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumXor(long[] a) {
-        long checkSum = 0;
-
-        for (long e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(short[] a) {
-        short checkSum = 0;
-
-        for (short e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(byte[] a) {
-        byte checkSum = 0;
-
-        for (byte e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(char[] a) {
-        char checkSum = 0;
-
-        for (char e : a) {
-            checkSum ^= e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumXor(float[] a) {
-        int checkSum = 0;
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = random.nextBoolean() ? -0.0f : 0.0f;
+                }
+            }
+        },
 
-        for (float e : a) {
-            checkSum ^= (int) e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumXor(double[] a) {
-        int checkSum = 0;
-
-        for (double e : a) {
-            checkSum ^= (int) e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumPlus(Object object) {
-        if (object instanceof int[]) {
-            return checkSumPlus((int[]) object);
-        } else if (object instanceof long[]) {
-            return checkSumPlus((long[]) object);
-        } else if (object instanceof short[]) {
-            return checkSumPlus((short[]) object);
-        } else if (object instanceof byte[]) {
-            return checkSumPlus((byte[]) object);
-        } else if (object instanceof char[]) {
-            return checkSumPlus((char[]) object);
-        } else if (object instanceof float[]) {
-            return checkSumPlus((float[]) object);
-        } else if (object instanceof double[]) {
-            return checkSumPlus((double[]) object);
-        } else if (object instanceof Integer[]) {
-            return checkSumPlus((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-            return -1;
-        }
-    }
-
-    private static int checkSumPlus(int[] a) {
-        int checkSum = 0;
+        DOUBLE {
+            void build(Object o, Random random) {
+                double[] a = (double[]) o;
 
-        for (int e : a) {
-            checkSum += e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumPlus(long[] a) {
-        long checkSum = 0;
-
-        for (long e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumPlus(short[] a) {
-        short checkSum = 0;
-
-        for (short e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
+                for (int i = 0; i < a.length; i++) {
+                    a[i] = random.nextBoolean() ? -0.0d : 0.0d;
+                }
+            }
+        };
 
-    private static int checkSumPlus(byte[] a) {
-        byte checkSum = 0;
-
-        for (byte e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumPlus(char[] a) {
-        char checkSum = 0;
-
-        for (char e : a) {
-            checkSum += e;
-        }
-        return (int) checkSum;
-    }
-
-    private static int checkSumPlus(float[] a) {
-        int checkSum = 0;
-
-        for (float e : a) {
-            checkSum += (int) e;
-        }
-        return checkSum;
+        abstract void build(Object o, Random random);
     }
 
-    private static int checkSumPlus(double[] a) {
-        int checkSum = 0;
-
-        for (double e : a) {
-            checkSum += (int) e;
-        }
-        return checkSum;
-    }
-
-    private static int checkSumPlus(Integer[] a) {
-        int checkSum = 0;
-
-        for (Integer e : a) {
-            checkSum += e.intValue();
-        }
-        return checkSum;
-    }
+    private static enum FloatingPointBuilder {
+        FLOAT {
+            void build(Object o, int a, int g, int z, int n, int p, Random random) {
+                float negativeValue = -random.nextFloat();
+                float positiveValue =  random.nextFloat();
+                float[] x = (float[]) o;
+                int fromIndex = 0;
 
-    private static void sortByInsertionSort(Object object) {
-        if (object instanceof int[]) {
-            sortByInsertionSort((int[]) object);
-        } else if (object instanceof long[]) {
-            sortByInsertionSort((long[]) object);
-        } else if (object instanceof short[]) {
-            sortByInsertionSort((short[]) object);
-        } else if (object instanceof byte[]) {
-            sortByInsertionSort((byte[]) object);
-        } else if (object instanceof char[]) {
-            sortByInsertionSort((char[]) object);
-        } else if (object instanceof float[]) {
-            sortByInsertionSort((float[]) object);
-        } else if (object instanceof double[]) {
-            sortByInsertionSort((double[]) object);
-        } else if (object instanceof Integer[]) {
-            sortByInsertionSort((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
+                writeValue(x, negativeValue, fromIndex, n);
+                fromIndex += n;
 
-    private static void sortByInsertionSort(int[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            int ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(long[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            long ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
+                writeValue(x, -0.0f, fromIndex, g);
+                fromIndex += g;
 
-    private static void sortByInsertionSort(short[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            short ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
+                writeValue(x, 0.0f, fromIndex, z);
+                fromIndex += z;
 
-    private static void sortByInsertionSort(byte[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            byte ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
+                writeValue(x, positiveValue, fromIndex, p);
+                fromIndex += p;
 
-    private static void sortByInsertionSort(char[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            char ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(float[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            float ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
+                writeValue(x, Float.NaN, fromIndex, a);
             }
-            a[j + 1] = ai;
-        }
-    }
-
-    private static void sortByInsertionSort(double[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            double ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
+        },
 
-    private static void sortByInsertionSort(Integer[] a) {
-        for (int j, i = 1; i < a.length; i++) {
-            Integer ai = a[i];
-            for (j = i - 1; j >= 0 && ai < a[j]; j--) {
-                a[j + 1] = a[j];
-            }
-            a[j + 1] = ai;
-        }
-    }
+        DOUBLE {
+            void build(Object o, int a, int g, int z, int n, int p, Random random) {
+                double negativeValue = -random.nextFloat();
+                double positiveValue =  random.nextFloat();
+                double[] x = (double[]) o;
+                int fromIndex = 0;
 
-    private static void sort(Object object) {
-        if (object instanceof int[]) {
-            Arrays.sort((int[]) object);
-        } else if (object instanceof long[]) {
-            Arrays.sort((long[]) object);
-        } else if (object instanceof short[]) {
-            Arrays.sort((short[]) object);
-        } else if (object instanceof byte[]) {
-            Arrays.sort((byte[]) object);
-        } else if (object instanceof char[]) {
-            Arrays.sort((char[]) object);
-        } else if (object instanceof float[]) {
-            Arrays.sort((float[]) object);
-        } else if (object instanceof double[]) {
-            Arrays.sort((double[]) object);
-        } else if (object instanceof Integer[]) {
-            Arrays.sort((Integer[]) object);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
+                writeValue(x, negativeValue, fromIndex, n);
+                fromIndex += n;
+
+                writeValue(x, -0.0d, fromIndex, g);
+                fromIndex += g;
 
-    private static void sortSubArray(Object object, int fromIndex, int toIndex) {
-        if (object instanceof int[]) {
-            Arrays.sort((int[]) object, fromIndex, toIndex);
-        } else if (object instanceof long[]) {
-            Arrays.sort((long[]) object, fromIndex, toIndex);
-        } else if (object instanceof short[]) {
-            Arrays.sort((short[]) object, fromIndex, toIndex);
-        } else if (object instanceof byte[]) {
-            Arrays.sort((byte[]) object, fromIndex, toIndex);
-        } else if (object instanceof char[]) {
-            Arrays.sort((char[]) object, fromIndex, toIndex);
-        } else if (object instanceof float[]) {
-            Arrays.sort((float[]) object, fromIndex, toIndex);
-        } else if (object instanceof double[]) {
-            Arrays.sort((double[]) object, fromIndex, toIndex);
-        } else if (object instanceof Integer[]) {
-            Arrays.sort((Integer[]) object, fromIndex, toIndex);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
+                writeValue(x, 0.0d, fromIndex, z);
+                fromIndex += z;
+
+                writeValue(x, positiveValue, fromIndex, p);
+                fromIndex += p;
 
-    private static void checkSubArray(Object object, int fromIndex, int toIndex, int m) {
-        if (object instanceof int[]) {
-            checkSubArray((int[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof long[]) {
-            checkSubArray((long[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof short[]) {
-            checkSubArray((short[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof byte[]) {
-            checkSubArray((byte[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof char[]) {
-            checkSubArray((char[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof float[]) {
-            checkSubArray((float[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof double[]) {
-            checkSubArray((double[]) object, fromIndex, toIndex, m);
-        } else if (object instanceof Integer[]) {
-            checkSubArray((Integer[]) object, fromIndex, toIndex, m);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
+                writeValue(x, Double.NaN, fromIndex, a);
+            }
+        };
 
-    private static void checkSubArray(Integer[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i].intValue() != 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
+        abstract void build(Object o, int a, int g, int z, int n, int p, Random random);
+
+        private static void writeValue(float[] a, float value, int fromIndex, int count) {
+            for (int i = fromIndex; i < fromIndex + count; i++) {
+                a[i] = value;
             }
         }
 
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i].intValue() > a[i + 1].intValue()) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i].intValue() != 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(int[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
+        private static void writeValue(double[] a, double value, int fromIndex, int count) {
+            for (int i = fromIndex; i < fromIndex + count; i++) {
+                a[i] = value;
             }
         }
     }
 
-    private static void checkSubArray(byte[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (byte) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (byte) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(long[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (long) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (long) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
+    private static Comparator<Pair> pairComparator = new Comparator<Pair>() {
 
-    private static void checkSubArray(char[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (char) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
+        @Override
+        public int compare(Pair p1, Pair p2) {
+            return p1.compareTo(p2);
         }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
-
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (char) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
+    };
 
-    private static void checkSubArray(short[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (short) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
-        }
+    private static class Pair implements Comparable<Pair> {
 
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (short) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(float[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (float) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
-        }
-
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
+        private Pair(int key, int value) {
+            this.key = key;
+            this.value = value;
         }
 
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (float) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
-        }
-    }
-
-    private static void checkSubArray(double[] a, int fromIndex, int toIndex, int m) {
-        for (int i = 0; i < fromIndex; i++) {
-            if (a[i] != (double) 0xDEDA) {
-                failed("Range sort changes left element on position " + i +
-                    ": " + a[i] + ", must be " + 0xDEDA);
-            }
+        int getKey() {
+            return key;
         }
 
-        for (int i = fromIndex; i < toIndex - 1; i++) {
-            if (a[i] > a[i + 1]) {
-                failedSort(i, "" + a[i], "" + a[i + 1]);
-            }
+        int getValue() {
+            return value;
         }
 
-        for (int i = toIndex; i < a.length; i++) {
-            if (a[i] != (double) 0xBABA) {
-                failed("Range sort changes right element on position " + i +
-                    ": " + a[i] + ", must be " + 0xBABA);
-            }
+        @Override
+        public int compareTo(Pair pair) {
+            return Integer.compare(key, pair.key);
         }
-    }
-
-    private static void checkRange(Object object, int m) {
-        if (object instanceof int[]) {
-            checkRange((int[]) object, m);
-        } else if (object instanceof long[]) {
-            checkRange((long[]) object, m);
-        } else if (object instanceof short[]) {
-            checkRange((short[]) object, m);
-        } else if (object instanceof byte[]) {
-            checkRange((byte[]) object, m);
-        } else if (object instanceof char[]) {
-            checkRange((char[]) object, m);
-        } else if (object instanceof float[]) {
-            checkRange((float[]) object, m);
-        } else if (object instanceof double[]) {
-            checkRange((double[]) object, m);
-        } else if (object instanceof Integer[]) {
-            checkRange((Integer[]) object, m);
-        } else {
-            failed("Unknow type of array: " + object + " of class " +
-                object.getClass().getName());
-        }
-    }
-
-    private static void checkRange(Integer[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
-
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
 
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
+        @Override
+        public String toString() {
+            return "(" + key + ", " + value + ")";
         }
-    }
-
-    private static void checkRange(int[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
 
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
-
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
+        private int key;
+        private int value;
     }
 
-    private static void checkRange(long[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
-
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
-
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(byte[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
-
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
-
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(short[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
-
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
-
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(char[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
-
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
+    private static class TestRandom extends Random {
 
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(float[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
-
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
-
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void checkRange(double[] a, int m) {
-        try {
-            Arrays.sort(a, m + 1, m);
+        private static final TestRandom BABA = new TestRandom(0xBABA);
+        private static final TestRandom DEDA = new TestRandom(0xDEDA);
+        private static final TestRandom C0FFEE = new TestRandom(0xC0FFEE);
 
-            failed("Sort does not throw IllegalArgumentException " +
-                " as expected: fromIndex = " + (m + 1) +
-                " toIndex = " + m);
-        }
-        catch (IllegalArgumentException iae) {
-            try {
-                Arrays.sort(a, -m, a.length);
-
-                failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                    " as expected: fromIndex = " + (-m));
-            }
-            catch (ArrayIndexOutOfBoundsException aoe) {
-                try {
-                    Arrays.sort(a, 0, a.length + m);
-
-                    failed("Sort does not throw ArrayIndexOutOfBoundsException " +
-                        " as expected: toIndex = " + (a.length + m));
-                }
-                catch (ArrayIndexOutOfBoundsException aie) {
-                    return;
-                }
-            }
-        }
-    }
-
-    private static void outArray(Object[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static void outArray(int[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static void outArray(float[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static void outArray(double[] a) {
-        for (int i = 0; i < a.length; i++) {
-            out.print(a[i] + " ");
-        }
-        out.println();
-    }
-
-    private static class MyRandom extends Random {
-        MyRandom(long seed) {
+        private TestRandom(long seed) {
             super(seed);
-            mySeed = seed;
+            this.seed = Long.toHexString(seed).toUpperCase();
         }
 
-        long getSeed() {
-            return mySeed;
+        @Override
+        public String toString() {
+            return seed;
         }
 
-        private long mySeed;
+        private String seed;
     }
-
-    private static String ourDescription;
 }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/java/util/Arrays/java.base/java/util/SortingHelper.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.  Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package java.util;
+
+/**
+ * This class provides access to package-private
+ * methods of DualPivotQuicksort class.
+ *
+ * @author Vladimir Yaroslavskiy
+ *
+ * @version 2019.09.19
+ *
+ * @since 14
+ */
+public enum SortingHelper {
+
+    DUAL_PIVOT_QUICKSORT("Dual-Pivot Quicksort") {
+
+        @Override
+        public void sort(Object a) {
+            if (a instanceof int[]) {
+                DualPivotQuicksort.sort((int[]) a, SEQUENTIAL, 0, ((int[]) a).length);
+            } else if (a instanceof long[]) {
+                DualPivotQuicksort.sort((long[]) a, SEQUENTIAL, 0, ((long[]) a).length);
+            } else if (a instanceof byte[]) {
+                DualPivotQuicksort.sort((byte[]) a, 0, ((byte[]) a).length);
+            } else if (a instanceof char[]) {
+                DualPivotQuicksort.sort((char[]) a, SEQUENTIAL, 0, ((char[]) a).length);
+            } else if (a instanceof short[]) {
+                DualPivotQuicksort.sort((short[]) a, SEQUENTIAL, 0, ((short[]) a).length);
+            } else if (a instanceof float[]) {
+                DualPivotQuicksort.sort((float[]) a, SEQUENTIAL, 0, ((float[]) a).length);
+            } else if (a instanceof double[]) {
+                DualPivotQuicksort.sort((double[]) a, SEQUENTIAL, 0, ((double[]) a).length);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object a, int low, int high) {
+            if (a instanceof int[]) {
+                DualPivotQuicksort.sort((int[]) a, SEQUENTIAL, low, high);
+            } else if (a instanceof long[]) {
+                DualPivotQuicksort.sort((long[]) a, SEQUENTIAL, low, high);
+            } else if (a instanceof byte[]) {
+                DualPivotQuicksort.sort((byte[]) a, low, high);
+            } else if (a instanceof char[]) {
+                DualPivotQuicksort.sort((char[]) a, SEQUENTIAL, low, high);
+            } else if (a instanceof short[]) {
+                DualPivotQuicksort.sort((short[]) a, SEQUENTIAL, low, high);
+            } else if (a instanceof float[]) {
+                DualPivotQuicksort.sort((float[]) a, SEQUENTIAL, low, high);
+            } else if (a instanceof double[]) {
+                DualPivotQuicksort.sort((double[]) a, SEQUENTIAL, low, high);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object[] a) {
+            fail(a);
+        }
+
+        @Override
+        public void sort(Object[] a, Comparator comparator) {
+            fail(a);
+        }
+    },
+
+    PARALLEL_SORT("Parallel sort") {
+
+        @Override
+        public void sort(Object a) {
+            if (a instanceof int[]) {
+                DualPivotQuicksort.sort((int[]) a, PARALLEL, 0, ((int[]) a).length);
+            } else if (a instanceof long[]) {
+                DualPivotQuicksort.sort((long[]) a, PARALLEL, 0, ((long[]) a).length);
+            } else if (a instanceof byte[]) {
+                DualPivotQuicksort.sort((byte[]) a, 0, ((byte[]) a).length);
+            } else if (a instanceof char[]) {
+                DualPivotQuicksort.sort((char[]) a, PARALLEL, 0, ((char[]) a).length);
+            } else if (a instanceof short[]) {
+                DualPivotQuicksort.sort((short[]) a, PARALLEL, 0, ((short[]) a).length);
+            } else if (a instanceof float[]) {
+                DualPivotQuicksort.sort((float[]) a, PARALLEL, 0, ((float[]) a).length);
+            } else if (a instanceof double[]) {
+                DualPivotQuicksort.sort((double[]) a, PARALLEL, 0, ((double[]) a).length);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object a, int low, int high) {
+            if (a instanceof int[]) {
+                DualPivotQuicksort.sort((int[]) a, PARALLEL, low, high);
+            } else if (a instanceof long[]) {
+                DualPivotQuicksort.sort((long[]) a, PARALLEL, low, high);
+            } else if (a instanceof byte[]) {
+                DualPivotQuicksort.sort((byte[]) a, low, high);
+            } else if (a instanceof char[]) {
+                DualPivotQuicksort.sort((char[]) a, PARALLEL, low, high);
+            } else if (a instanceof short[]) {
+                DualPivotQuicksort.sort((short[]) a, PARALLEL, low, high);
+            } else if (a instanceof float[]) {
+                DualPivotQuicksort.sort((float[]) a, PARALLEL, low, high);
+            } else if (a instanceof double[]) {
+                DualPivotQuicksort.sort((double[]) a, PARALLEL, low, high);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object[] a) {
+            fail(a);
+        }
+
+        @Override
+        public void sort(Object[] a, Comparator comparator) {
+            fail(a);
+        }
+    },
+
+    HEAP_SORT("Heap sort") {
+
+        @Override
+        public void sort(Object a) {
+            if (a instanceof int[]) {
+                DualPivotQuicksort.sort(null, (int[]) a, BIG_DEPTH, 0, ((int[]) a).length);
+            } else if (a instanceof long[]) {
+                DualPivotQuicksort.sort(null, (long[]) a, BIG_DEPTH, 0, ((long[]) a).length);
+            } else if (a instanceof byte[]) {
+                DualPivotQuicksort.sort((byte[]) a, 0, ((byte[]) a).length);
+            } else if (a instanceof char[]) {
+                DualPivotQuicksort.sort((char[]) a, BIG_DEPTH, 0, ((char[]) a).length);
+            } else if (a instanceof short[]) {
+                DualPivotQuicksort.sort((short[]) a, BIG_DEPTH, 0, ((short[]) a).length);
+            } else if (a instanceof float[]) {
+                DualPivotQuicksort.sort(null, (float[]) a, BIG_DEPTH, 0, ((float[]) a).length);
+            } else if (a instanceof double[]) {
+                DualPivotQuicksort.sort(null, (double[]) a, BIG_DEPTH, 0, ((double[]) a).length);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object a, int low, int high) {
+            if (a instanceof int[]) {
+                DualPivotQuicksort.sort(null, (int[]) a, BIG_DEPTH, low, high);
+            } else if (a instanceof long[]) {
+                DualPivotQuicksort.sort(null, (long[]) a, BIG_DEPTH, low, high);
+            } else if (a instanceof byte[]) {
+                DualPivotQuicksort.sort((byte[]) a, low, high);
+            } else if (a instanceof char[]) {
+                DualPivotQuicksort.sort((char[]) a, BIG_DEPTH, low, high);
+            } else if (a instanceof short[]) {
+                DualPivotQuicksort.sort((short[]) a, BIG_DEPTH, low, high);
+            } else if (a instanceof float[]) {
+                DualPivotQuicksort.sort(null, (float[]) a, BIG_DEPTH, low, high);
+            } else if (a instanceof double[]) {
+                DualPivotQuicksort.sort(null, (double[]) a, BIG_DEPTH, low, high);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object[] a) {
+            fail(a);
+        }
+
+        @Override
+        public void sort(Object[] a, Comparator comparator) {
+            fail(a);
+        }
+    },
+
+    ARRAYS_SORT("Arrays.sort") {
+
+        @Override
+        public void sort(Object a) {
+            if (a instanceof int[]) {
+                Arrays.sort((int[]) a);
+            } else if (a instanceof long[]) {
+                Arrays.sort((long[]) a);
+            } else if (a instanceof byte[]) {
+                Arrays.sort((byte[]) a);
+            } else if (a instanceof char[]) {
+                Arrays.sort((char[]) a);
+            } else if (a instanceof short[]) {
+                Arrays.sort((short[]) a);
+            } else if (a instanceof float[]) {
+                Arrays.sort((float[]) a);
+            } else if (a instanceof double[]) {
+                Arrays.sort((double[]) a);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object a, int low, int high) {
+            if (a instanceof int[]) {
+                Arrays.sort((int[]) a, low, high);
+            } else if (a instanceof long[]) {
+                Arrays.sort((long[]) a, low, high);
+            } else if (a instanceof byte[]) {
+                Arrays.sort((byte[]) a, low, high);
+            } else if (a instanceof char[]) {
+                Arrays.sort((char[]) a, low, high);
+            } else if (a instanceof short[]) {
+                Arrays.sort((short[]) a, low, high);
+            } else if (a instanceof float[]) {
+                Arrays.sort((float[]) a, low, high);
+            } else if (a instanceof double[]) {
+                Arrays.sort((double[]) a, low, high);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object[] a) {
+            Arrays.sort(a);
+        }
+
+        @Override
+        @SuppressWarnings("unchecked")
+        public void sort(Object[] a, Comparator comparator) {
+            Arrays.sort(a, comparator);
+        }
+    },
+
+    ARRAYS_PARALLEL_SORT("Arrays.parallelSort") {
+
+        @Override
+        public void sort(Object a) {
+            if (a instanceof int[]) {
+                Arrays.parallelSort((int[]) a);
+            } else if (a instanceof long[]) {
+                Arrays.parallelSort((long[]) a);
+            } else if (a instanceof byte[]) {
+                Arrays.parallelSort((byte[]) a);
+            } else if (a instanceof char[]) {
+                Arrays.parallelSort((char[]) a);
+            } else if (a instanceof short[]) {
+                Arrays.parallelSort((short[]) a);
+            } else if (a instanceof float[]) {
+                Arrays.parallelSort((float[]) a);
+            } else if (a instanceof double[]) {
+                Arrays.parallelSort((double[]) a);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        public void sort(Object a, int low, int high) {
+            if (a instanceof int[]) {
+                Arrays.parallelSort((int[]) a, low, high);
+            } else if (a instanceof long[]) {
+                Arrays.parallelSort((long[]) a, low, high);
+            } else if (a instanceof byte[]) {
+                Arrays.parallelSort((byte[]) a, low, high);
+            } else if (a instanceof char[]) {
+                Arrays.parallelSort((char[]) a, low, high);
+            } else if (a instanceof short[]) {
+                Arrays.parallelSort((short[]) a, low, high);
+            } else if (a instanceof float[]) {
+                Arrays.parallelSort((float[]) a, low, high);
+            } else if (a instanceof double[]) {
+                Arrays.parallelSort((double[]) a, low, high);
+            } else {
+                fail(a);
+            }
+        }
+
+        @Override
+        @SuppressWarnings("unchecked")
+        public void sort(Object[] a) {
+            Arrays.parallelSort((Comparable[]) a);
+        }
+
+        @Override
+        @SuppressWarnings("unchecked")
+        public void sort(Object[] a, Comparator comparator) {
+            Arrays.parallelSort(a, comparator);
+        }
+    };
+
+    abstract public void sort(Object a);
+
+    abstract public void sort(Object a, int low, int high);
+
+    abstract public void sort(Object[] a);
+
+    abstract public void sort(Object[] a, Comparator comparator);
+
+    private SortingHelper(String name) {
+        this.name = name;
+    }
+
+    @Override
+    public String toString() {
+        return name;
+    }
+
+    private static void fail(Object a) {
+        throw new RuntimeException("Unexpected type of array: " + a.getClass().getName());
+    }
+
+    private String name;
+
+    /**
+     * Parallelism level for sequential and parallel sorting.
+     */
+    private static final int SEQUENTIAL = 0;
+    private static final int PARALLEL = 87;
+
+    /**
+     * Heap sort will be invoked, if recursion depth is too big.
+     * Value is taken from DualPivotQuicksort.MAX_RECURSION_DEPTH.
+     */
+    private static final int BIG_DEPTH = 64 * (3 << 1);
+}
--- a/test/jdk/java/util/Locale/LocaleProviders.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/util/Locale/LocaleProviders.java	Thu Nov 14 13:50:03 2019 +0000
@@ -24,6 +24,7 @@
 import java.text.spi.*;
 import java.util.*;
 import java.util.spi.*;
+import java.util.stream.IntStream;
 import sun.util.locale.provider.LocaleProviderAdapter;
 
 public class LocaleProviders {
@@ -87,6 +88,10 @@
                 bug8232871Test();
                 break;
 
+            case "bug8232860Test":
+                bug8232860Test();
+                break;
+
             default:
                 throw new RuntimeException("Test method '"+methodName+"' not found.");
         }
@@ -327,4 +332,42 @@
                 "native calendar is not JapaneseCalendar: " + calType);
         }
     }
+
+    static void bug8232860Test() {
+        var inputList = List.of(123, 123.4);
+        var nfExpectedList = List.of("123", "123.4");
+        var ifExpectedList = List.of("123", "123");
+
+        var type = LocaleProviderAdapter.getAdapter(CalendarNameProvider.class, Locale.US)
+                                        .getAdapterType();
+        if (type == LocaleProviderAdapter.Type.HOST && (IS_WINDOWS || IS_MAC)) {
+            final var numf = NumberFormat.getNumberInstance(Locale.US);
+            final var intf = NumberFormat.getIntegerInstance(Locale.US);
+
+            IntStream.range(0, inputList.size())
+                .forEach(i -> {
+                    var input = inputList.get(i);
+                    var nfExpected = nfExpectedList.get(i);
+                    var result = numf.format(input);
+                    if (!result.equals(nfExpected)) {
+                        throw new RuntimeException("Incorrect number format. " +
+                            "input: " + input + ", expected: " +
+                            nfExpected + ", result: " + result);
+                    }
+
+                    var ifExpected = ifExpectedList.get(i);
+                    result = intf.format(input);
+                    if (!result.equals(ifExpected)) {
+                        throw new RuntimeException("Incorrect integer format. " +
+                            "input: " + input + ", expected: " +
+                            ifExpected + ", result: " + result);
+                    }
+                });
+            System.out.println("bug8232860Test succeeded.");
+        } else {
+            System.out.println("Test ignored. Either :-\n" +
+                "OS is neither macOS/Windows, or\n" +
+                "provider is not HOST: " + type);
+        }
+    }
 }
--- a/test/jdk/java/util/Locale/LocaleProvidersRun.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/util/Locale/LocaleProvidersRun.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * @bug 6336885 7196799 7197573 7198834 8000245 8000615 8001440 8008577
  *      8010666 8013086 8013233 8013903 8015960 8028771 8054482 8062006
- *      8150432 8215913 8220227 8228465 8232871
+ *      8150432 8215913 8220227 8228465 8232871 8232860
  * @summary tests for "java.locale.providers" system property
  * @library /test/lib
  * @build LocaleProviders
@@ -159,6 +159,9 @@
 
         //testing 8232871 fix. (macOS only)
         testRun("HOST", "bug8232871Test", "", "", "");
+
+        //testing 8232860 fix. (macOS/Windows only)
+        testRun("HOST", "bug8232860Test", "", "", "");
     }
 
     private static void testRun(String prefList, String methodName,
--- a/test/jdk/java/util/zip/ZipFile/ZipFileInputStreamSkipTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/java/util/zip/ZipFile/ZipFileInputStreamSkipTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -105,7 +105,7 @@
      * @throws Exception If an error occurs during the test
      */
     @Test
-    private void testStoredSkip() throws Exception {
+    public void testStoredSkip() throws Exception {
 
         try (ZipFile zf = new ZipFile(STORED_ZIPFILE.toFile())) {
             var entries = zf.entries();
@@ -153,7 +153,7 @@
      * @throws Exception If an error occurs during the test
      */
     @Test
-    private void testStoredNegativeSkip() throws Exception {
+    public void testStoredNegativeSkip() throws Exception {
 
         try (ZipFile zf = new ZipFile(STORED_ZIPFILE.toFile())) {
             var entries = zf.entries();
@@ -198,7 +198,7 @@
      * @throws Exception If an error occurs during the test
      */
     @Test
-    private void testDeflatedSkip() throws Exception {
+    public void testDeflatedSkip() throws Exception {
         try (ZipFile zf = new ZipFile(DEFLATED_ZIPFILE.toFile())) {
             var toSkip = 5; // Bytes to Skip
             var entries = zf.entries();
@@ -225,7 +225,7 @@
      * @throws Exception If an unexpected error occurs during the test
      */
     @Test
-    private void testDeflatedIOException() throws Exception {
+    public void testDeflatedIOException() throws Exception {
         try (ZipFile zf = new ZipFile(DEFLATED_ZIPFILE.toFile())) {
             var entries = zf.entries();
             while (entries.hasMoreElements()) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/jdk/jdk/jfr/api/recorder/TestRecorderListenerWithDump.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,37 @@
+package jdk.jfr.api.recorder;
+
+import java.nio.file.Paths;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import jdk.jfr.FlightRecorder;
+import jdk.jfr.FlightRecorderListener;
+import jdk.jfr.Recording;
+/**
+ * @test TestRecorderListenerWithDump
+ *
+ * @key jfr
+ * @requires vm.hasJFR
+ * @run main/othervm jdk.jfr.api.recorder.TestRecorderListenerWithDump
+ */
+public class TestRecorderListenerWithDump {
+
+    public static void main(String... args) throws Exception {
+        AtomicBoolean nullRecording = new AtomicBoolean();
+        FlightRecorder.addListener(new FlightRecorderListener() {
+            public void recordingStateChanged(Recording r) {
+                if (r == null) {
+                    nullRecording.set(true);
+                } else {
+                    System.out.println("Recording " + r.getName() + " " + r.getState());
+                }
+            }
+        });
+        try (Recording r = new Recording()) {
+            r.start();
+            r.dump(Paths.get("dump.jfr"));
+        }
+        if (nullRecording.get()) {
+            throw new Exception("FlightRecorderListener returned null recording");
+        }
+    }
+}
--- a/test/jdk/jdk/jfr/event/gc/collection/GCEventAll.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/gc/collection/GCEventAll.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -125,7 +125,7 @@
         // recording.stop and getBeanCollectionCount().
         doSystemGc();
         // Add an extra System.gc() to make sure we get at least one full garbage_collection batch at
-        // the end of the test. This extra System.gc() is only necessary when using "UseConcMarkSweepGC" and "+ExplicitGCInvokesConcurrent".
+        // the end of the test. This extra System.gc() is only necessary when using "+ExplicitGCInvokesConcurrent".
         doSystemGc();
 
         recording.stop();
@@ -170,7 +170,6 @@
 
         // For some GC configurations, the JFR recording may have stopped before we received the last gc event.
         try {
-            events = filterIncompleteGcBatch(events);
             gcBatches = GCHelper.GcBatch.createFromEvents(events);
             eventCounts = GCHelper.CollectionSummary.createFromEvents(gcBatches);
 
@@ -191,41 +190,6 @@
         }
     }
 
-    /**
-     * When using collector ConcurrentMarkSweep with -XX:+ExplicitGCInvokesConcurrent, the JFR recording may
-     * stop before we have received the last garbage_collection event.
-     *
-     * This function does 3 things:
-     * 1. Check if the last batch is incomplete.
-     * 2. If it is incomplete, then asserts that incomplete batches are allowed for this configuration.
-     * 3. If incomplete batches are allowed, then the incomplete batch is removed.
-     *
-     * @param events All events
-     * @return All events with any incomplete batch removed.
-     * @throws Throwable
-     */
-    private List<RecordedEvent> filterIncompleteGcBatch(List<RecordedEvent> events) throws Throwable {
-        List<RecordedEvent> returnEvents = new ArrayList<RecordedEvent>(events);
-        int lastGcId = getLastGcId(events);
-        List<RecordedEvent> lastBatchEvents = getEventsWithGcId(events, lastGcId);
-        String[] endEvents = {GCHelper.event_garbage_collection, GCHelper.event_old_garbage_collection, GCHelper.event_young_garbage_collection};
-        boolean isComplete = containsAnyPath(lastBatchEvents, endEvents);
-        if (!isComplete) {
-            // The last GC batch does not contain an end event. The batch is incomplete.
-            // This is only allowed if we are using old_collector="ConcurrentMarkSweep" and "-XX:+ExplicitGCInvokesConcurrent"
-            boolean isExplicitGCInvokesConcurrent = hasInputArgument("-XX:+ExplicitGCInvokesConcurrent");
-            boolean isConcurrentMarkSweep = GCHelper.gcConcurrentMarkSweep.equals(oldCollector);
-            String msg = String.format(
-                    "Incomplete batch only allowed for '%s' with -XX:+ExplicitGCInvokesConcurrent",
-                    GCHelper.gcConcurrentMarkSweep);
-            Asserts.assertTrue(isConcurrentMarkSweep && isExplicitGCInvokesConcurrent, msg);
-
-            // Incomplete batch is allowed with the current settings. Remove incomplete batch.
-            returnEvents.removeAll(lastBatchEvents);
-        }
-        return returnEvents;
-    }
-
     private boolean hasInputArgument(String arg) {
         return ManagementFactory.getRuntimeMXBean().getInputArguments().contains(arg);
     }
@@ -276,8 +240,7 @@
     }
 
     private void verifyCollectionCount(String collector, long eventCounts, long beanCounts) {
-        if (GCHelper.gcConcurrentMarkSweep.equals(collector) || GCHelper.gcG1Old.equals(oldCollector)) {
-            // ConcurrentMarkSweep mixes old and new collections. Not same values as in MXBean.
+        if (GCHelper.gcG1Old.equals(oldCollector)) {
             // MXBean does not report old collections for G1Old, so we have nothing to compare with.
             return;
         }
@@ -338,11 +301,8 @@
                         }
                     }
                 }
-                if (!GCHelper.gcConcurrentMarkSweep.equals(batch.getName())) {
-                    // We do not get heap_summary events for ConcurrentMarkSweep
-                    Asserts.assertEquals(1, countBeforeGc, "Unexpected number of heap_summary.before_gc");
-                    Asserts.assertEquals(1, countAfterGc, "Unexpected number of heap_summary.after_gc");
-                }
+                Asserts.assertEquals(1, countBeforeGc, "Unexpected number of heap_summary.before_gc");
+                Asserts.assertEquals(1, countAfterGc, "Unexpected number of heap_summary.after_gc");
             } catch (Throwable e) {
                 GCHelper.log("verifySingleGcBatch failed for gcEvent:");
                 GCHelper.log(batch.getLog());
--- a/test/jdk/jdk/jfr/event/gc/collection/TestG1ParallelPhases.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/gc/collection/TestG1ParallelPhases.java	Thu Nov 14 13:50:03 2019 +0000
@@ -176,6 +176,9 @@
     public static void provokeMixedGC(int g1HeapRegionSize) {
         final var arraySize = 20_000;
         var liveOldObjects = new ArrayList<byte[]>();
+
+        // Make sure the heap is in a known state.
+        getWhiteBox().fullGC();
         allocateOldObjects(liveOldObjects, g1HeapRegionSize, arraySize);
         waitTillCMCFinished(10);
         getWhiteBox().g1StartConcMarkCycle();
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSConcurrent.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @requires vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run driver jdk.jfr.event.gc.collection.TestGCCauseWithCMSConcurrent
- */
-public class TestGCCauseWithCMSConcurrent {
-    public static void main(String[] args) throws Exception {
-        String testID = "CMSConcurrent";
-        String[] vmFlags = {"-XX:+UseConcMarkSweepGC", "-XX:+ExplicitGCInvokesConcurrent"};
-        String[] gcNames = {GCHelper.gcConcurrentMarkSweep, GCHelper.gcParNew, GCHelper.gcSerialOld};
-        String[] gcCauses = {"CMS Concurrent Mark", "Allocation Failure", "System.gc()"};
-        GCGarbageCollectionUtil.test(testID, vmFlags, gcNames, gcCauses);
-    }
-}
-
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCCauseWithCMSMarkSweep.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @requires vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run driver jdk.jfr.event.gc.collection.TestGCCauseWithCMSMarkSweep
- */
-public class TestGCCauseWithCMSMarkSweep {
-    public static void main(String[] args) throws Exception {
-        String testID = "CMSMarkSweep";
-        String[] vmFlags = {"-XX:+UseConcMarkSweepGC"};
-        String[] gcNames = {GCHelper.gcConcurrentMarkSweep, GCHelper.gcParNew, GCHelper.gcSerialOld};
-        String[] gcCauses = {"CMS Concurrent Mark", "Allocation Failure", "System.gc()"};
-        GCGarbageCollectionUtil.test(testID, vmFlags, gcNames, gcCauses);
-    }
-}
-
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSConcurrent.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- *
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xmx32m -Xmn8m -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent jdk.jfr.event.gc.collection.TestGCEventMixedWithCMSConcurrent
- * good debug flags: -Xlog:gc+heap=trace,gc*=debug
- */
-public class TestGCEventMixedWithCMSConcurrent {
-    public static void main(String[] args) throws Throwable {
-        GCEventAll.doTest();
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithCMSMarkSweep.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- *
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xmx32m -Xmn8m -XX:+UseConcMarkSweepGC -XX:-ExplicitGCInvokesConcurrent jdk.jfr.event.gc.collection.TestGCEventMixedWithCMSMarkSweep
- * good debug flags: -Xlog:gc+heap=trace,gc*=debug
- */
-public class TestGCEventMixedWithCMSMarkSweep {
-    public static void main(String[] args) throws Throwable {
-        GCEventAll.doTest();
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/collection/TestGCEventMixedWithParNew.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -Xmx32m -Xmn8m -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC jdk.jfr.event.gc.collection.TestGCEventMixedWithParNew
- * good debug flags: -Xlog:gc*=debug
- */
-
-public class TestGCEventMixedWithParNew {
-    public static void main(String[] args) throws Throwable {
-        GCEventAll.doTest();
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/collection/TestYoungGarbageCollectionEventWithParNew.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.collection;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -Xmx50m -Xmn2m -XX:+UseConcMarkSweepGC -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc+heap=trace,gc*=debug jdk.jfr.event.gc.collection.TestYoungGarbageCollectionEventWithParNew
- */
-public class TestYoungGarbageCollectionEventWithParNew {
-
-    public static void main(String[] args) throws Exception {
-        YoungGarbageCollectionEvent.test();
-    }
-
-}
--- a/test/jdk/jdk/jfr/event/gc/detailed/PromotionEvent.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/gc/detailed/PromotionEvent.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -69,8 +69,7 @@
         List<GarbageCollectorMXBean> gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
         for (GarbageCollectorMXBean gcBean : gcBeans) {
             if ("PS Scavenge".equals(gcBean.getName())
-                    || "G1 Young Generation".equals(gcBean.getName())
-                    || ("ParNew".equals(gcBean.getName()))) {
+                    || "G1 Young Generation".equals(gcBean.getName())) {
                 ycBean = gcBean;
             }
 
--- a/test/jdk/jdk/jfr/event/gc/detailed/TestCMSConcurrentModeFailureEvent.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,88 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.detailed;
-
-import java.io.IOException;
-import java.io.File;
-import java.nio.charset.Charset;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Optional;
-
-import jdk.jfr.consumer.RecordedEvent;
-import jdk.jfr.consumer.RecordingFile;
-import jdk.test.lib.Asserts;
-import jdk.test.lib.jfr.EventNames;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- *
- * @run main jdk.jfr.event.gc.detailed.TestCMSConcurrentModeFailureEvent
- */
-public class TestCMSConcurrentModeFailureEvent {
-
-    private final static String EVENT_NAME = EventNames.ConcurrentModeFailure;
-    private final static String EVENT_SETTINGS_FILE = System.getProperty("test.src", ".") + File.separator + "concurrentmodefailure-testsettings.jfc";
-    private final static String JFR_FILE = "TestCMSConcurrentModeFailureEvent.jfr";
-    private final static int BYTES_TO_ALLOCATE = 1024 * 512;
-
-    public static void main(String[] args) throws Exception {
-        String[] vmFlags = {"-Xmx128m", "-XX:MaxTenuringThreshold=0", "-Xlog:gc*=debug:testCMSGC.log",
-            "-XX:+UseConcMarkSweepGC", "-XX:+UnlockExperimentalVMOptions", "-XX:-UseFastUnorderedTimeStamps"};
-
-        if (!ExecuteOOMApp.execute(EVENT_SETTINGS_FILE, JFR_FILE, vmFlags, BYTES_TO_ALLOCATE)) {
-            System.out.println("OOM happened in the other thread(not test thread). Skip test.");
-            // Skip test, process terminates due to the OOME error in the different thread
-            return;
-        }
-
-        Optional<RecordedEvent> event = RecordingFile.readAllEvents(Paths.get(JFR_FILE)).stream().findFirst();
-        if (event.isPresent()) {
-            Asserts.assertEquals(EVENT_NAME, event.get().getEventType().getName(), "Wrong event type");
-        } else {
-            // No event received. Check if test did trigger the event.
-            boolean isEventTriggered = fileContainsString("testCMSGC.log", "concurrent mode failure");
-            System.out.println("isEventTriggered=" +isEventTriggered);
-            Asserts.assertFalse(isEventTriggered, "Event found in log, but not in JFR");
-        }
-    }
-
-    private static boolean fileContainsString(String filename, String text) throws IOException {
-        Path p = Paths.get(filename);
-        for (String line : Files.readAllLines(p, Charset.defaultCharset())) {
-            if (line.contains(text)) {
-                return true;
-            }
-        }
-        return false;
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/detailed/TestPromotionFailedEventWithParNew.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.detailed;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm  jdk.jfr.event.gc.detailed.TestPromotionFailedEventWithParNew
- */
-public class TestPromotionFailedEventWithParNew {
-
-    public static void main(String[] args) throws Throwable {
-        PromotionFailedEvent.test("TestPromotionFailedEventWithParNew",
-                new String[]{"-Xmx32m", "-Xmn30m", "-XX:-UseDynamicNumberOfGCThreads",
-                    "-XX:ParallelGCThreads=3", "-XX:MaxTenuringThreshold=0",
-                    "-Xlog:gc*=debug", "-XX:+UseConcMarkSweepGC",
-                    "-XX:+UnlockExperimentalVMOptions", "-XX:-UseFastUnorderedTimeStamps"});
-    }
-
-}
--- a/test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventConcurrentCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.heapsummary;
-
-import java.time.Duration;
-import java.util.List;
-
-import jdk.jfr.Recording;
-import jdk.jfr.consumer.RecordedEvent;
-import jdk.test.lib.Asserts;
-import jdk.test.lib.jfr.EventNames;
-import jdk.test.lib.jfr.Events;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent jdk.jfr.event.gc.heapsummary.TestHeapSummaryEventConcurrentCMS
- */
-public class TestHeapSummaryEventConcurrentCMS {
-
-    public static void main(String[] args) throws Exception {
-        Recording recording = new Recording();
-        recording.enable(EventNames.GarbageCollection).withThreshold(Duration.ofMillis(0));
-        recording.enable(EventNames.GCHeapSummary).withThreshold(Duration.ofMillis(0));
-
-        recording.start();
-        // Need several GCs to ensure at least one heap summary event from concurrent CMS
-        GCHelper.callSystemGc(6, true);
-        recording.stop();
-
-        // Remove first and last GCs which can be incomplete
-        List<RecordedEvent> events = GCHelper.removeFirstAndLastGC(Events.fromRecording(recording));
-        Asserts.assertFalse(events.isEmpty(), "No events found");
-        for (RecordedEvent event : events) {
-            System.out.println("Event: " + event);
-            if (!isCmsGcEvent(event)) {
-                continue;
-            }
-            int gcId = Events.assertField(event, "gcId").getValue();
-            verifyHeapSummary(events, gcId, "Before GC");
-            verifyHeapSummary(events, gcId, "After GC");
-        }
-    }
-
-    private static boolean isCmsGcEvent(RecordedEvent event) {
-        if (!Events.isEventType(event, EventNames.GarbageCollection)) {
-            return false;
-        }
-        final String gcName = Events.assertField(event, "name").notEmpty().getValue();
-        return "ConcurrentMarkSweep".equals(gcName);
-    }
-
-    private static void verifyHeapSummary(List<RecordedEvent> events, int gcId, String when) {
-        for (RecordedEvent event : events) {
-            if (!Events.isEventType(event, EventNames.GCHeapSummary)) {
-                continue;
-            }
-            if (gcId == (int)Events.assertField(event, "gcId").getValue() &&
-                    when.equals(Events.assertField(event, "when").getValue())) {
-                System.out.printf("Found " + EventNames.GCHeapSummary + " for id=%d, when=%s%n", gcId, when);
-                return;
-            }
-        }
-        Asserts.fail(String.format("No " + EventNames.GCHeapSummary + " for id=%d, when=%s", gcId, when));
-    }
-
-}
--- a/test/jdk/jdk/jfr/event/gc/heapsummary/TestHeapSummaryEventParNewCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.heapsummary;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC jdk.jfr.event.gc.heapsummary.TestHeapSummaryEventParNewCMS
- */
-public class TestHeapSummaryEventParNewCMS {
-    public static void main(String[] args) throws Exception {
-        HeapSummaryEventAllGcs.test(GCHelper.gcParNew, GCHelper.gcConcurrentMarkSweep);
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSConcurrent.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.objectcount;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithCMSConcurrent
- */
-public class TestObjectCountAfterGCEventWithCMSConcurrent {
-    public static void main(String[] args) throws Exception {
-        ObjectCountAfterGCEvent.test(GCHelper.gcConcurrentMarkSweep);
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/objectcount/TestObjectCountAfterGCEventWithCMSMarkSweep.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.objectcount;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseConcMarkSweepGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithCMSMarkSweep
- */
-public class TestObjectCountAfterGCEventWithCMSMarkSweep {
-    public static void main(String[] args) throws Exception {
-        ObjectCountAfterGCEvent.test(GCHelper.gcSerialOld);
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSConcurrent.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.refstat;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != false
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc+heap=trace,gc*=debug -XX:+UseConcMarkSweepGC -XX:+ExplicitGCInvokesConcurrent jdk.jfr.event.gc.refstat.TestRefStatEventWithCMSConcurrent
- */
-public class TestRefStatEventWithCMSConcurrent {
-    public static void main(String[] args) throws Exception {
-        RefStatEvent.test(GCHelper.gcConcurrentMarkSweep);
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/refstat/TestRefStatEventWithCMSMarkSweep.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.gc.refstat;
-import jdk.test.lib.jfr.GCHelper;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires (vm.gc == "ConcMarkSweep" | vm.gc == null) & !vm.graal.enabled
- *           & vm.opt.ExplicitGCInvokesConcurrent != true
- * @library /test/lib /test/jdk
- * @run main/othervm  -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -Xlog:gc+heap=trace,gc*=debug -XX:+UseConcMarkSweepGC jdk.jfr.event.gc.refstat.TestRefStatEventWithCMSMarkSweep
- */
-public class TestRefStatEventWithCMSMarkSweep {
-    public static void main(String[] args) throws Exception {
-        RefStatEvent.test(GCHelper.gcSerialOld);
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/AllocationStackTrace.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/gc/stacktrace/AllocationStackTrace.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -210,52 +210,6 @@
     }
 
     /**
-     * Tests event stacktrace for young GC if -XX:+UseConcMarkSweepGC is used
-     */
-    public static void testParNewAllocEvent() throws Exception {
-        GarbageCollectorMXBean bean = garbageCollectorMXBean("ParNew");
-        MemoryAllocator memory = new EdenMemoryAllocator();
-
-        String[] expectedStack = new String[]{
-            "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testAllocEvent",
-            "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testParNewAllocEvent"
-        };
-
-        testAllocEvent(bean, memory, expectedStack);
-    }
-
-    /**
-     * Tests event stacktrace for old GC if -XX:+UseConcMarkSweepGC is used
-     */
-    public static void testConcMarkSweepAllocEvent() throws Exception {
-        GarbageCollectorMXBean bean = garbageCollectorMXBean("ConcurrentMarkSweep");
-        MemoryAllocator memory = new OldGenMemoryAllocator();
-
-        String[] expectedStack = new String[]{
-            "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testAllocEvent",
-            "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testConcMarkSweepAllocEvent"
-        };
-
-        testAllocEvent(bean, memory, expectedStack);
-    }
-
-    /**
-     * Tests event stacktrace during metaspace GC threshold if
-     * -XX:+UseConcMarkSweepGC is used
-     */
-    public static void testMetaspaceConcMarkSweepGCAllocEvent() throws Exception {
-        GarbageCollectorMXBean bean = garbageCollectorMXBean("ConcurrentMarkSweep");
-        MemoryAllocator memory = new MetaspaceMemoryAllocator();
-
-        String[] expectedStack = new String[]{
-            "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testAllocEvent",
-            "jdk.jfr.event.gc.stacktrace.AllocationStackTrace.testMetaspaceConcMarkSweepGCAllocEvent"
-        };
-
-        testAllocEvent(bean, memory, expectedStack);
-    }
-
-    /**
      * Tests event stacktrace for young GC if -XX:+UseParallelGC is used
      */
     public static void testParallelScavengeAllocEvent() throws Exception {
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/TestConcMarkSweepAllocationPendingStackTrace.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.stacktrace;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:MaxNewSize=10M -Xmx64M -XX:+UseConcMarkSweepGC -Xlog:gc* jdk.jfr.event.gc.stacktrace.TestConcMarkSweepAllocationPendingStackTrace
- */
-public class TestConcMarkSweepAllocationPendingStackTrace {
-
-    public static void main(String[] args) throws Exception {
-        AllocationStackTrace.testConcMarkSweepAllocEvent();
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.stacktrace;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @requires !(vm.compMode == "Xcomp" & os.arch == "aarch64")
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -XX:MaxMetaspaceSize=64M -Xlog:gc* jdk.jfr.event.gc.stacktrace.TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace
- */
-public class TestMetaspaceConcMarkSweepGCAllocationPendingStackTrace {
-
-    public static void main(String[] args) throws Exception {
-        AllocationStackTrace.testMetaspaceConcMarkSweepGCAllocEvent();
-    }
-}
--- a/test/jdk/jdk/jfr/event/gc/stacktrace/TestParNewAllocationPendingStackTrace.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package jdk.jfr.event.gc.stacktrace;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- *
- * @requires (vm.gc == "null" | vm.gc == "ConcMarkSweep") & !vm.graal.enabled
- * @library /test/lib /test/jdk
- * @run main/othervm -XX:+UseConcMarkSweepGC -Xlog:gc* -XX:+FlightRecorder jdk.jfr.event.gc.stacktrace.TestParNewAllocationPendingStackTrace
- */
-public class TestParNewAllocationPendingStackTrace {
-
-    public static void main(String[] args) throws Exception {
-        AllocationStackTrace.testParNewAllocEvent();
-    }
-}
--- a/test/jdk/jdk/jfr/event/oldobject/TestCMS.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.  Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-package jdk.jfr.event.oldobject;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import jdk.jfr.Recording;
-import jdk.jfr.consumer.RecordedEvent;
-import jdk.jfr.internal.test.WhiteBox;
-import jdk.test.lib.jfr.EventNames;
-import jdk.test.lib.jfr.Events;
-
-/**
- * @test
- * @key jfr
- * @requires vm.hasJFR
- * @requires vm.gc == "null" & !vm.graal.enabled
- * @summary Test leak profiler with CMS GC
- * @library /test/lib /test/jdk
- * @modules jdk.jfr/jdk.jfr.internal.test
- * @run main/othervm  -XX:TLABSize=2k -XX:+UseConcMarkSweepGC jdk.jfr.event.oldobject.TestCMS
- */
-public class TestCMS {
-
-    static private class FindMe {
-    }
-
-    public static List<FindMe[]> list = new ArrayList<>(OldObjects.MIN_SIZE);
-
-    public static void main(String[] args) throws Exception {
-        WhiteBox.setWriteAllObjectSamples(true);
-
-        try (Recording r = new Recording()) {
-            r.enable(EventNames.OldObjectSample).withStackTrace().with("cutoff", "infinity");
-            r.start();
-            allocateFindMe();
-            System.gc();
-            r.stop();
-            List<RecordedEvent> events = Events.fromRecording(r);
-            System.out.println(events);
-            if (OldObjects.countMatchingEvents(events, FindMe[].class, null, null, -1, "allocateFindMe") == 0) {
-                throw new Exception("Could not find leak with " + FindMe[].class);
-            }
-        }
-    }
-
-    public static void allocateFindMe() {
-        for (int i = 0; i < OldObjects.MIN_SIZE; i++) {
-            // Allocate array to trigger sampling code path for interpreter / c1
-            list.add(new FindMe[0]);
-        }
-    }
-
-}
--- a/test/jdk/jdk/jfr/event/oldobject/TestMetadataRetention.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/oldobject/TestMetadataRetention.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -88,9 +88,8 @@
                 allocatorThread = null;
 
                 // System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent
-                // is NOT set. If this flag is set G1 will never unload classes on System.gc()
-                // and CMS will not guarantee that all semantically dead classes will be
-                // unloaded. As far as the "jfr" key guarantees no VM flags are set from the
+                // is NOT set. If this flag is set G1 will never unload classes on System.gc().
+                // As far as the "jfr" key guarantees no VM flags are set from the
                 // outside it should be enough with System.gc().
                 System.gc();
 
--- a/test/jdk/jdk/jfr/event/runtime/TestBiasedLockRevocationEvents.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/runtime/TestBiasedLockRevocationEvents.java	Thu Nov 14 13:50:03 2019 +0000
@@ -101,8 +101,9 @@
     }
 
     // Retrieve all biased lock revocation events related to the provided lock class, sorted by start time
-    static List<RecordedEvent> getRevocationEvents(Recording recording, String fieldName, Class<?> lockClass) throws Throwable {
+    static List<RecordedEvent> getRevocationEvents(Recording recording, String eventTypeName, String fieldName, Class<?> lockClass) throws Throwable {
         return Events.fromRecording(recording).stream()
+                .filter(e -> e.getEventType().getName().equals(eventTypeName))
                 .filter(e -> ((RecordedClass)e.getValue(fieldName)).getName().equals(lockClass.getName()))
                 .sorted(Comparator.comparing(RecordedEvent::getStartTime))
                 .collect(Collectors.toList());
@@ -119,7 +120,7 @@
         Thread biasBreaker = triggerRevocation(1, MyLock.class);
 
         recording.stop();
-        List<RecordedEvent> events = getRevocationEvents(recording, "lockClass", MyLock.class);
+        List<RecordedEvent> events = getRevocationEvents(recording, EventNames.BiasedLockRevocation, "lockClass", MyLock.class);
         Asserts.assertEQ(events.size(), 1);
 
         RecordedEvent event = events.get(0);
@@ -143,7 +144,7 @@
         Thread biasBreaker = triggerRevocation(BULK_REVOKE_THRESHOLD, MyLock.class);
 
         recording.stop();
-        List<RecordedEvent> events = getRevocationEvents(recording, "revokedClass", MyLock.class);
+        List<RecordedEvent> events = getRevocationEvents(recording, EventNames.BiasedLockClassRevocation, "revokedClass", MyLock.class);
         Asserts.assertEQ(events.size(), 1);
 
         RecordedEvent event = events.get(0);
@@ -169,7 +170,7 @@
         Thread.holdsLock(l);
 
         recording.stop();
-        List<RecordedEvent> events = getRevocationEvents(recording, "lockClass", MyLock.class);
+        List<RecordedEvent> events = getRevocationEvents(recording, EventNames.BiasedLockSelfRevocation, "lockClass", MyLock.class);
         Asserts.assertEQ(events.size(), 1);
 
         RecordedEvent event = events.get(0);
@@ -211,7 +212,7 @@
         touch(l);
 
         recording.stop();
-        List<RecordedEvent> events = getRevocationEvents(recording, "lockClass", MyLock.class);
+        List<RecordedEvent> events = getRevocationEvents(recording, EventNames.BiasedLockRevocation, "lockClass", MyLock.class);
         Asserts.assertEQ(events.size(), 1);
 
         RecordedEvent event = events.get(0);
@@ -237,7 +238,7 @@
         Thread biasBreaker1 = triggerRevocation(BULK_REVOKE_THRESHOLD, MyLock.class);
 
         recording.stop();
-        List<RecordedEvent> events = getRevocationEvents(recording, "revokedClass", MyLock.class);
+        List<RecordedEvent> events = getRevocationEvents(recording, EventNames.BiasedLockClassRevocation, "revokedClass", MyLock.class);
         Asserts.assertEQ(events.size(), 2);
 
         // The rebias event should occur before the noRebias one
--- a/test/jdk/jdk/jfr/event/runtime/TestClassLoadingStatisticsEvent.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/runtime/TestClassLoadingStatisticsEvent.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,9 +47,8 @@
  * the loadedClassCount and unloadedClassCount attributes are correct.
  *
  * System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent
- * is NOT set. If this flag is set G1 will never unload classes on System.gc()
- * and CMS will not guarantee that all semantically dead classes will be
- * unloaded. As far as the "jfr" key guarantees no VM flags are set from the
+ * is NOT set. If this flag is set G1 will never unload classes on System.gc().
+ * As far as the "jfr" key guarantees no VM flags are set from the
  * outside it should be enough with System.gc().
  */
 public class TestClassLoadingStatisticsEvent {
--- a/test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/runtime/TestClassUnloadEvent.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -49,8 +49,7 @@
 
 /**
  * System.gc() will trigger class unloading if -XX:+ExplicitGCInvokesConcurrent is NOT set.
- * If this flag is set G1 will never unload classes on System.gc() and
- * CMS will not guarantee that all semantically dead classes will be unloaded.
+ * If this flag is set G1 will never unload classes on System.gc().
  * As far as the "jfr" key guarantees no VM flags are set from the outside
  * it should be enough with System.gc().
  */
--- a/test/jdk/jdk/jfr/event/runtime/TestThreadCpuTimeEvent.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/runtime/TestThreadCpuTimeEvent.java	Thu Nov 14 13:50:03 2019 +0000
@@ -107,7 +107,7 @@
             } catch (BrokenBarrierException e) {
                 // Another thread has been interrupted - wait for us to be interrupted as well
                 while (!interrupted()) {
-                    yield();
+                    Thread.yield();
                 }
             } catch (InterruptedException e) {
                 // Normal way of stopping the thread
--- a/test/jdk/jdk/jfr/event/runtime/TestVmFlagChangedEvent.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/jdk/jfr/event/runtime/TestVmFlagChangedEvent.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -51,7 +51,6 @@
 
     public static void main(String[] args) throws Throwable {
         EventFlag[] eventFlags = {
-            new EventFlag(EventNames.LongFlagChanged, "CMSWaitDuration", "2500"),
             new EventFlag(EventNames.StringFlagChanged, "HeapDumpPath", "/a/sample/path"),
             new EventFlag(EventNames.BooleanFlagChanged, "HeapDumpOnOutOfMemoryError", "true")
         };
--- a/test/jdk/tools/launcher/ArgsFileTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/tools/launcher/ArgsFileTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -23,7 +23,7 @@
 
 /**
  * @test
- * @bug 8027634
+ * @bug 8027634 8231863
  * @summary Argument parsing from file
  * @modules jdk.compiler
  *          jdk.zipfs
@@ -61,13 +61,17 @@
         env.put(JLDEBUG_KEY, "true");
     }
 
-    private File createArgFile(String fname, List<String> lines) throws IOException {
+    private File createArgFile(String fname, List<String> lines, boolean endWithNewline) throws IOException {
         File argFile = new File(fname);
         argFile.delete();
-        createAFile(argFile, lines);
+        createAFile(argFile, lines, endWithNewline);
         return argFile;
     }
 
+    private File createArgFile(String fname, List<String> lines) throws IOException {
+        return createArgFile(fname, lines, true);
+    }
+
     private void verifyOptions(List<String> args, TestResult tr) {
         if (args.isEmpty()) {
             return;
@@ -266,6 +270,23 @@
         userArgs.delete();
     }
 
+    @Test
+    public void userApplicationWithoutEmptyLastLine() throws IOException {
+        File cpOpt = createArgFile("cpOpt", Arrays.asList("-classpath ."), false);
+        File vmArgs = createArgFile("vmArgs", Arrays.asList("-Xint"), false);
+
+        TestResult tr = doExec(env, javaCmd, "-cp", "test.jar", "@cpOpt", "Foo", "-test");
+        verifyOptions(Arrays.asList("-cp", "test.jar", "-classpath", ".", "Foo", "-test"), tr);
+        verifyUserArgs(Arrays.asList("-test"), tr, 6);
+
+        tr = doExec(env, javaCmd,  "-cp", "test.jar", "@vmArgs", "Foo", "-test");
+        verifyOptions(Arrays.asList("-cp", "test.jar", "-Xint", "Foo", "-test"), tr);
+        verifyUserArgs(Arrays.asList("-test"), tr, 5);
+
+        cpOpt.delete();
+        vmArgs.delete();
+    }
+
     // test with missing file
     @Test
     public void missingFileNegativeTest() throws IOException {
--- a/test/jdk/tools/launcher/TestHelper.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/jdk/tools/launcher/TestHelper.java	Thu Nov 14 13:50:03 2019 +0000
@@ -349,12 +349,23 @@
      * occurs then back off for a moment and try again. When a number of
      * attempts fail, give up and throw an exception.
      */
-    void createAFile(File aFile, List<String> contents) throws IOException {
+    void createAFile(File aFile, List<String> lines) throws IOException {
+        createAFile(aFile, lines, true);
+    }
+
+    void createAFile(File aFile, List<String> lines, boolean endWithNewline) throws IOException {
         IOException cause = null;
         for (int attempts = 0; attempts < 10; attempts++) {
             try {
-                Files.write(aFile.getAbsoluteFile().toPath(), contents,
-                    Charset.defaultCharset(), CREATE, TRUNCATE_EXISTING, WRITE);
+                if (endWithNewline) {
+                    Files.write(aFile.getAbsoluteFile().toPath(),
+                        lines, Charset.defaultCharset(),
+                        CREATE, TRUNCATE_EXISTING, WRITE);
+                } else {
+                    Files.write(aFile.getAbsoluteFile().toPath(),
+                        String.join(System.lineSeparator(), lines).getBytes(Charset.defaultCharset()),
+                        CREATE, TRUNCATE_EXISTING, WRITE);
+                }
                 if (cause != null) {
                     /*
                      * report attempts and errors that were encountered
--- a/test/langtools/tools/javac/ConditionalWithVoid.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/ConditionalWithVoid.java	Thu Nov 14 13:50:03 2019 +0000
@@ -4,7 +4,7 @@
  * @summary The compiler was allowing void types in its parsing of conditional expressions.
  * @author tball
  *
- * @compile/fail/ref=ConditionalWithVoid.out --enable-preview -source ${jdk.version} -XDrawDiagnostics ConditionalWithVoid.java
+ * @compile/fail/ref=ConditionalWithVoid.out -XDrawDiagnostics ConditionalWithVoid.java
  */
 public class ConditionalWithVoid {
     public void test(Object o, String s) {
--- a/test/langtools/tools/javac/ConditionalWithVoid.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/ConditionalWithVoid.out	Thu Nov 14 13:50:03 2019 +0000
@@ -2,6 +2,4 @@
 ConditionalWithVoid.java:14:53: compiler.err.void.not.allowed.here
 ConditionalWithVoid.java:16:82: compiler.err.void.not.allowed.here
 ConditionalWithVoid.java:18:64: compiler.err.void.not.allowed.here
-- compiler.note.preview.filename: ConditionalWithVoid.java
-- compiler.note.preview.recompile
 4 errors
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/langtools/tools/javac/api/TestModuleUnnamedPackage.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug     8234025
+ * @summary Elements.getPackageElement(ModuleElement,CharSequence) returns null for unnamed package
+ * @modules jdk.compiler
+ * @library /tools/lib /tools/javac/lib
+ * @build toolbox.ModuleBuilder toolbox.ToolBox
+ * @run main TestModuleUnnamedPackage
+ */
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.util.List;
+import java.util.Set;
+
+import javax.annotation.processing.RoundEnvironment;
+import javax.lang.model.element.ModuleElement;
+import javax.lang.model.element.PackageElement;
+import javax.lang.model.element.TypeElement;
+import javax.lang.model.util.Elements;
+import javax.tools.JavaCompiler;
+import javax.tools.JavaFileObject;
+import javax.tools.StandardJavaFileManager;
+import javax.tools.StandardLocation;
+import javax.tools.ToolProvider;
+
+import com.sun.source.util.JavacTask;
+import toolbox.Assert;
+import toolbox.ToolBox;
+
+public class TestModuleUnnamedPackage extends JavacTestingAbstractProcessor {
+    public static void main(String... args) throws IOException {
+        TestModuleUnnamedPackage t = new TestModuleUnnamedPackage();
+            t.run();
+    }
+
+    void run() throws IOException {
+        ToolBox tb = new ToolBox();
+        Path src = Path.of("src");
+
+        tb.writeJavaFiles(src, "module m { exports p; }",
+                "package p; public class C { }");
+
+        JavaCompiler c = ToolProvider.getSystemJavaCompiler();
+        StandardJavaFileManager fm = c.getStandardFileManager(null, null, null);
+        fm.setLocationFromPaths(StandardLocation.SOURCE_PATH, List.of(src));
+        List<String> options = List.of("-proc:only");
+        Iterable<? extends JavaFileObject> files = fm.getJavaFileObjects(tb.findJavaFiles(src));
+        JavacTask t = (JavacTask) c.getTask(null, fm, null, null, null, files);
+        t.setProcessors(List.of(this));
+        t.call();
+    }
+
+    public boolean process(Set<? extends TypeElement> elems, RoundEnvironment rEnv) {
+        if (rEnv.processingOver()) {
+            Elements elements = processingEnv.getElementUtils();
+            ModuleElement m = elements.getModuleElement("m");
+            PackageElement unnamed = elements.getPackageElement(m, "");
+            System.out.println("module m: " + m);
+            System.out.println("module m, unnamed package: " + unnamed);
+            Assert.checkNonNull(unnamed, "unnamed package in module m");
+        }
+        return true;
+    }
+}
+
--- a/test/langtools/tools/javac/diags/examples.not-yet.txt	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples.not-yet.txt	Thu Nov 14 13:50:03 2019 +0000
@@ -116,6 +116,7 @@
 compiler.warn.override.bridge
 compiler.warn.position.overflow                         # CRTable: caused by files with long lines >= 1024 chars
 compiler.warn.proc.type.already.exists                  # JavacFiler: just mentioned in TODO
+compiler.warn.restricted.type.not.allowed.preview       # not produced by the compiler right now
 compiler.warn.unchecked.assign                          # DEAD, replaced by compiler.misc.unchecked.assign
 compiler.warn.unchecked.cast.to.type                    # DEAD, replaced by compiler.misc.unchecked.cast.to.type
 compiler.warn.unexpected.archive.file                   # Paths: zip file with unknown extn
--- a/test/langtools/tools/javac/diags/examples/BreakOutsideSwitchExpression.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/BreakOutsideSwitchExpression.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.break.outside.switch.expression
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class BreakOutsideSwitchExpression {
     int t(int i) {
--- a/test/langtools/tools/javac/diags/examples/ContinueOutsideSwitchExpression.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/ContinueOutsideSwitchExpression.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.continue.outside.switch.expression
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class ContinueOutsideSwitchExpression {
     int t(int i) {
--- a/test/langtools/tools/javac/diags/examples/IllegalRefToRestrictedType.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/IllegalRefToRestrictedType.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,7 +22,8 @@
  */
 
 // key: compiler.warn.illegal.ref.to.restricted.type
-// key: compiler.warn.restricted.type.not.allowed.preview
+// key: compiler.warn.restricted.type.not.allowed
+// options: -Xlint:-options -source 13
 
 class IllegalRefToVarType {
     yield list() { return null; }
--- a/test/langtools/tools/javac/diags/examples/IncompatibleTypesInSwitchExpression.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/IncompatibleTypesInSwitchExpression.java	Thu Nov 14 13:50:03 2019 +0000
@@ -24,9 +24,6 @@
 // key: compiler.err.prob.found.req
 // key: compiler.misc.incompatible.type.in.switch.expression
 // key: compiler.misc.inconvertible.types
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 
 class IncompatibleTypesInSwitchExpression {
--- a/test/langtools/tools/javac/diags/examples/InvalidYield.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/InvalidYield.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,7 +22,6 @@
  */
 
 // key: compiler.err.invalid.yield
-// options: --enable-preview --source ${jdk.version}
 
 class BreakComplexValueNoSwitchExpressions {
     void t() {
--- a/test/langtools/tools/javac/diags/examples/InvalidYieldWarning.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/InvalidYieldWarning.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,7 +22,7 @@
  */
 
 // key: compiler.warn.invalid.yield
-// options: --source ${jdk.version}
+// options: -Xlint:-options --source 13
 
 class BreakComplexValueNoSwitchExpressions {
     void t() {
--- a/test/langtools/tools/javac/diags/examples/MultipleCaseLabels.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/MultipleCaseLabels.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,8 +22,8 @@
  */
 
 // key: compiler.misc.feature.multiple.case.labels
-// key: compiler.warn.preview.feature.use.plural
-// options: --enable-preview -source ${jdk.version} -Xlint:preview
+// key: compiler.err.feature.not.supported.in.source.plural
+// options: -Xlint:-options -source 13
 
 class MultipleCaseLabels {
     void m(int i) {
--- a/test/langtools/tools/javac/diags/examples/NoSwitchExpression.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/NoSwitchExpression.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,7 +22,6 @@
  */
 
 // key: compiler.err.no.switch.expression
-// options: --enable-preview --source ${jdk.version}
 
 class BreakComplexValueNoSwitchExpressions {
     void t() {
--- a/test/langtools/tools/javac/diags/examples/NoSwitchExpressionQualify.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/NoSwitchExpressionQualify.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,7 +22,6 @@
  */
 
 // key: compiler.err.no.switch.expression.qualify
-// options: --enable-preview --source ${jdk.version}
 
 class BreakComplexValueNoSwitchExpressions {
     void t() {
--- a/test/langtools/tools/javac/diags/examples/NotExhaustive.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/NotExhaustive.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.not.exhaustive
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class NotExhaustive {
     int t(int i) {
--- a/test/langtools/tools/javac/diags/examples/RestrictedTypeNotAllowedPreview.java	Fri Nov 08 14:54:17 2019 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-// key: compiler.warn.restricted.type.not.allowed.preview
-
-class yield { }
--- a/test/langtools/tools/javac/diags/examples/ReturnOutsideSwitchExpression.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/ReturnOutsideSwitchExpression.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.return.outside.switch.expression
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class ReturnOutsideSwitchExpression {
     int t(int i) {
--- a/test/langtools/tools/javac/diags/examples/RuleCompletesNormally.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/RuleCompletesNormally.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.rule.completes.normally
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class RuleCompletesNormally {
     public String convert(int i) {
--- a/test/langtools/tools/javac/diags/examples/SwitchCaseUnexpectedStatement.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchCaseUnexpectedStatement.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.switch.case.unexpected.statement
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class ReturnOutsideSwitchExpression {
     void t(int i) {
--- a/test/langtools/tools/javac/diags/examples/SwitchExpressionCompletesNormally.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchExpressionCompletesNormally.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.switch.expression.completes.normally
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class SwitchExpressionCompletesNormally {
     public String convert(int i) {
--- a/test/langtools/tools/javac/diags/examples/SwitchExpressionEmpty.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchExpressionEmpty.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.switch.expression.empty
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class BreakOutsideSwitchExpression {
     String t(E e) {
--- a/test/langtools/tools/javac/diags/examples/SwitchExpressionNoResultExpressions.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchExpressionNoResultExpressions.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.switch.expression.no.result.expressions
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class SwitchExpressionCompletesNormally {
     public String convert(int i) {
--- a/test/langtools/tools/javac/diags/examples/SwitchExpressionTargetCantBeVoid.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchExpressionTargetCantBeVoid.java	Thu Nov 14 13:50:03 2019 +0000
@@ -24,9 +24,6 @@
 // key: compiler.err.prob.found.req
 // key: compiler.misc.incompatible.ret.type.in.lambda
 // key: compiler.misc.switch.expression.target.cant.be.void
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class SwitchExpressionTargetCantBeVoid {
 
--- a/test/langtools/tools/javac/diags/examples/SwitchExpressions.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchExpressions.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,8 +22,8 @@
  */
 
 // key: compiler.misc.feature.switch.expressions
-// key: compiler.warn.preview.feature.use.plural
-// options: --enable-preview -source ${jdk.version} -Xlint:preview
+// key: compiler.err.feature.not.supported.in.source.plural
+// options: -Xlint:-options -source 13
 
 class SwitchExpressions {
     int m(int i) {
--- a/test/langtools/tools/javac/diags/examples/SwitchMixingCaseTypes.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchMixingCaseTypes.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,9 +22,6 @@
  */
 
 // key: compiler.err.switch.mixing.case.types
-// key: compiler.note.preview.filename
-// key: compiler.note.preview.recompile
-// options: --enable-preview -source ${jdk.version}
 
 class SwitchMixingCaseTypes {
 
--- a/test/langtools/tools/javac/diags/examples/SwitchRules.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/diags/examples/SwitchRules.java	Thu Nov 14 13:50:03 2019 +0000
@@ -22,8 +22,8 @@
  */
 
 // key: compiler.misc.feature.switch.rules
-// key: compiler.warn.preview.feature.use.plural
-// options: --enable-preview -source ${jdk.version} -Xlint:preview
+// key: compiler.err.feature.not.supported.in.source.plural
+// options: -Xlint:-options -source 13
 
 class SwitchExpressions {
     void m(int i) {
--- a/test/langtools/tools/javac/expswitch/ExpSwitchNestingTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/expswitch/ExpSwitchNestingTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -76,9 +76,6 @@
         }
     }
 
-    private static String[] PREVIEW_OPTIONS = {"--enable-preview", "-source",
-                                               Integer.toString(Runtime.version().feature())};
-
     private void program(String... constructs) {
         String s = "class C { static boolean cond = false; static int x = 0; void m() { # } }";
         for (String c : constructs)
@@ -88,7 +85,7 @@
 
     private void assertOK(String... constructs) {
         reset();
-        addCompileOptions(PREVIEW_OPTIONS);
+        addCompileOptions();
         program(constructs);
         try {
             compile();
@@ -101,7 +98,7 @@
 
     private void assertOKWithWarning(String warning, String... constructs) {
         reset();
-        addCompileOptions(PREVIEW_OPTIONS);
+        addCompileOptions();
         program(constructs);
         try {
             compile();
@@ -114,7 +111,7 @@
 
     private void assertFail(String expectedDiag, String... constructs) {
         reset();
-        addCompileOptions(PREVIEW_OPTIONS);
+        addCompileOptions();
         program(constructs);
         try {
             compile();
--- a/test/langtools/tools/javac/lambda/BadSwitchExpressionLambda.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/lambda/BadSwitchExpressionLambda.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Adding switch expressions
- * @compile/fail/ref=BadSwitchExpressionLambda.out -XDrawDiagnostics --enable-preview -source ${jdk.version} BadSwitchExpressionLambda.java
+ * @compile/fail/ref=BadSwitchExpressionLambda.out -XDrawDiagnostics BadSwitchExpressionLambda.java
  */
 
 class BadSwitchExpressionLambda {
--- a/test/langtools/tools/javac/lambda/BadSwitchExpressionLambda.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/lambda/BadSwitchExpressionLambda.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,6 +1,4 @@
 BadSwitchExpressionLambda.java:19:26: compiler.err.prob.found.req: (compiler.misc.incompatible.ret.type.in.lambda: (compiler.misc.switch.expression.target.cant.be.void))
 BadSwitchExpressionLambda.java:21:9: compiler.err.cant.apply.symbol: kindname.method, r, BadSwitchExpressionLambda.SAM, @11, kindname.class, BadSwitchExpressionLambda, (compiler.misc.no.conforming.assignment.exists: (compiler.misc.incompatible.ret.type.in.lambda: (compiler.misc.switch.expression.target.cant.be.void)))
 BadSwitchExpressionLambda.java:22:16: compiler.err.prob.found.req: (compiler.misc.unexpected.ret.val)
-- compiler.note.preview.filename: BadSwitchExpressionLambda.java
-- compiler.note.preview.recompile
 3 errors
--- a/test/langtools/tools/javac/parser/JavacParserTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/parser/JavacParserTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1096,7 +1096,7 @@
         String expectedErrors = "Test.java:1:178: compiler.err.switch.case.unexpected.statement\n";
         StringWriter out = new StringWriter();
         JavacTaskImpl ct = (JavacTaskImpl) tool.getTask(out, fm, null,
-                Arrays.asList("-XDrawDiagnostics", "--enable-preview", "-source", SOURCE_VERSION),
+                Arrays.asList("-XDrawDiagnostics"),
                 null, Arrays.asList(new MyFileObject(code)));
 
         CompilationUnitTree cut = ct.parse().iterator().next();
--- a/test/langtools/tools/javac/processing/model/TestSourceVersion.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/processing/model/TestSourceVersion.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
 
 /*
  * @test
- * @bug 7025809 8028543 6415644 8028544 8029942 8187951 8193291 8196551
+ * @bug 7025809 8028543 6415644 8028544 8029942 8187951 8193291 8196551 8233096
  * @summary Test latest, latestSupported, underscore as keyword, etc.
  * @author  Joseph D. Darcy
  * @modules java.compiler
@@ -31,11 +31,12 @@
  */
 
 import java.util.*;
+import java.util.function.Predicate;
 import javax.lang.model.SourceVersion;
 import static javax.lang.model.SourceVersion.*;
 
 /**
- * Verify latest[Supported] behavior.
+ * Verify behavior of latest[Supported] and other methods.
  */
 public class TestSourceVersion {
     public static void main(String... args) {
@@ -43,6 +44,7 @@
         testVersionVaryingKeywords();
         testRestrictedKeywords();
         testVar();
+        testYield();
     }
 
     private static void testLatestSupported() {
@@ -52,8 +54,10 @@
         SourceVersion latestSupported = SourceVersion.latestSupported();
 
         if (latest == last &&
-            latestSupported == SourceVersion.valueOf("RELEASE_" + Runtime.version().feature()) &&
-            (latest == latestSupported || (latest.ordinal() - latestSupported.ordinal() == 1)) )
+            latestSupported == SourceVersion.valueOf("RELEASE_" +
+                                                     Runtime.version().feature()) &&
+            (latest == latestSupported ||
+             (latest.ordinal() - latestSupported.ordinal() == 1)) )
             return;
         else {
             throw new RuntimeException("Unexpected release value(s) found:\n" +
@@ -73,14 +77,14 @@
             String key = entry.getKey();
             SourceVersion value = entry.getValue();
 
-            check(true, isKeyword(key), "keyword", latest());
-            check(false, isName(key),   "name",    latest());
+            check(true,  key, (String s) -> isKeyword(s), "keyword", latest());
+            check(false, key, (String s) -> isName(s),    "name",    latest());
 
             for(SourceVersion version : SourceVersion.values()) {
                 boolean isKeyword = version.compareTo(value) >= 0;
 
-                check(isKeyword,  isKeyword(key, version), "keyword", version);
-                check(!isKeyword, isName(key, version),    "name",    version);
+                check(isKeyword,  key, (String s) -> isKeyword(s, version), "keyword", version);
+                check(!isKeyword, key, (String s) -> isName(s, version),    "name",    version);
             }
         }
     }
@@ -98,31 +102,47 @@
             Set.of("open", "module", "requires", "transitive", "exports",
                    "opens", "to", "uses", "provides", "with");
 
-        for(String key : restrictedKeywords) {
-            for(SourceVersion version : SourceVersion.values()) {
-                check(false, isKeyword(key, version), "keyword", version);
-                check(true,  isName(key, version),    "name",    version);
+        for (String key : restrictedKeywords) {
+            for (SourceVersion version : SourceVersion.values()) {
+                check(false, key, (String s) -> isKeyword(s, version), "keyword", version);
+                check(true,  key, (String s) -> isName(s, version),    "name",    version);
             }
         }
     }
 
     private static void testVar() {
+        for (SourceVersion version : SourceVersion.values()) {
+            Predicate<String> isKeywordVersion = (String s) -> isKeyword(s, version);
+            Predicate<String> isNameVersion = (String s) -> isName(s, version);
 
-        for(SourceVersion version : SourceVersion.values()) {
-            check(false, isKeyword("var",     version), "keyword", version);
-            check(false, isKeyword("foo.var", version), "keyword", version);
-            check(false, isKeyword("var.foo", version), "keyword", version);
-
-            check(true, isName("var", version),     "name", version);
-            check(true, isName("foo.var", version), "name", version);
-            check(true, isName("var.foo", version), "name", version);
+            for (String name : List.of("var", "foo.var", "var.foo")) {
+                check(false, name, isKeywordVersion, "keyword", version);
+                check(true, name,  isNameVersion, "name", version);
+            }
         }
     }
 
-    private static void check(boolean result, boolean expected,
-                              String message, SourceVersion version) {
+    private static void testYield() {
+        for (SourceVersion version : SourceVersion.values()) {
+            Predicate<String> isKeywordVersion = (String s) -> isKeyword(s, version);
+            Predicate<String> isNameVersion = (String s) -> isName(s, version);
+
+            for  (String name : List.of("yield", "foo.yield", "yield.foo")) {
+                check(false, name, isKeywordVersion, "keyword", version);
+                check(true, name,  isNameVersion, "name", version);
+            }
+        }
+    }
+
+    private static void check(boolean expected,
+                              String input,
+                              Predicate<String> predicate,
+                              String message,
+                              SourceVersion version) {
+        boolean result  = predicate.test(input);
         if (result != expected) {
-            throw new RuntimeException("Unexpected " + message +  "-ness of _ on " + version);
+            throw new RuntimeException("Unexpected " + message +  "-ness of " + input +
+                                       " on " + version);
         }
     }
 }
--- a/test/langtools/tools/javac/switchexpr/BlockExpression.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/BlockExpression.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Verify rule cases with expression statements and throw statements work.
- * @compile --enable-preview -source ${jdk.version} BlockExpression.java
- * @run main/othervm --enable-preview BlockExpression
+ * @compile BlockExpression.java
+ * @run main BlockExpression
  */
 
 public class BlockExpression {
--- a/test/langtools/tools/javac/switchexpr/BooleanNumericNonNumeric.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/BooleanNumericNonNumeric.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify the type of a conditional expression with nested switch expression is computed properly
- * @compile/fail/ref=BooleanNumericNonNumeric.out -XDrawDiagnostics --enable-preview -source ${jdk.version} BooleanNumericNonNumeric.java
+ * @compile/fail/ref=BooleanNumericNonNumeric.out -XDrawDiagnostics BooleanNumericNonNumeric.java
  */
 
 public class BooleanNumericNonNumeric {
--- a/test/langtools/tools/javac/switchexpr/BooleanNumericNonNumeric.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/BooleanNumericNonNumeric.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,3 @@
 BooleanNumericNonNumeric.java:11:20: compiler.err.operator.cant.be.applied.1: +, int, boolean
 BooleanNumericNonNumeric.java:19:15: compiler.err.cant.deref: int
-- compiler.note.preview.filename: BooleanNumericNonNumeric.java
-- compiler.note.preview.recompile
 2 errors
--- a/test/langtools/tools/javac/switchexpr/BreakTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/BreakTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -67,7 +67,7 @@
 
         StringWriter out = new StringWriter();
         JavacTask ct = (JavacTask) tool.getTask(out, null, noErrors,
-            List.of("-XDdev", "--enable-preview", "-source", sourceVersion), null,
+            List.of("-XDdev"), null,
             Arrays.asList(new MyFileObject(CODE)));
         List<String> labels = new ArrayList<>();
         new TreePathScanner<Void, Void>() {
--- a/test/langtools/tools/javac/switchexpr/CRT.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/CRT.java	Thu Nov 14 13:50:03 2019 +0000
@@ -151,9 +151,7 @@
         tb.createDirectories(classes);
         tb.cleanDirectory(classes);
         new JavacTask(tb)
-                .options("-Xjcov",
-                         "--enable-preview",
-                         "-source", SOURCE_VERSION)
+                .options("-Xjcov")
                 .outdir(classes)
                 .sources("public class Test {\n" +
                          code +
--- a/test/langtools/tools/javac/switchexpr/DefiniteAssignment1.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/DefiniteAssignment1.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8214031 8221413
  * @summary Verify that definite assignment when true works (legal code)
- * @compile --enable-preview --source ${jdk.version} DefiniteAssignment1.java
- * @run main/othervm --enable-preview DefiniteAssignment1
+ * @compile DefiniteAssignment1.java
+ * @run main DefiniteAssignment1
  */
 public class DefiniteAssignment1 {
     public static void main(String[] args) {
--- a/test/langtools/tools/javac/switchexpr/DefiniteAssignment2.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/DefiniteAssignment2.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8214031
  * @summary Verify that definite assignment when true works (illegal code)
- * @compile/fail/ref=DefiniteAssignment2.out --enable-preview --source ${jdk.version} -XDrawDiagnostics DefiniteAssignment2.java
+ * @compile/fail/ref=DefiniteAssignment2.out -XDrawDiagnostics DefiniteAssignment2.java
  */
 public class DefiniteAssignment2 {
 
--- a/test/langtools/tools/javac/switchexpr/DefiniteAssignment2.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/DefiniteAssignment2.out	Thu Nov 14 13:50:03 2019 +0000
@@ -5,6 +5,4 @@
 DefiniteAssignment2.java:59:19: compiler.err.var.might.not.have.been.initialized: x
 DefiniteAssignment2.java:69:19: compiler.err.var.might.not.have.been.initialized: x
 DefiniteAssignment2.java:79:20: compiler.err.var.might.already.be.assigned: x
-- compiler.note.preview.filename: DefiniteAssignment2.java
-- compiler.note.preview.recompile
 7 errors
--- a/test/langtools/tools/javac/switchexpr/EmptySwitch.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/EmptySwitch.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * @bug 8206986 8226510
  * @summary Verify than a switch that does not yield a value is rejected.
- * @compile/fail/ref=EmptySwitch.out --enable-preview -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=FLOW EmptySwitch.java
+ * @compile/fail/ref=EmptySwitch.out -XDrawDiagnostics -XDshould-stop.at=FLOW EmptySwitch.java
  */
 
 public class EmptySwitch {
--- a/test/langtools/tools/javac/switchexpr/EmptySwitch.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/EmptySwitch.out	Thu Nov 14 13:50:03 2019 +0000
@@ -3,6 +3,4 @@
 EmptySwitch.java:38:10: compiler.err.switch.expression.no.result.expressions
 EmptySwitch.java:44:9: compiler.err.switch.expression.completes.normally
 EmptySwitch.java:47:26: compiler.err.rule.completes.normally
-- compiler.note.preview.filename: EmptySwitch.java
-- compiler.note.preview.recompile
 5 errors
--- a/test/langtools/tools/javac/switchexpr/ExhaustiveEnumSwitch.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExhaustiveEnumSwitch.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,9 +25,9 @@
  * @test
  * @bug 8206986
  * @summary Verify that an switch expression over enum can be exhaustive without default.
- * @compile --enable-preview -source ${jdk.version} ExhaustiveEnumSwitch.java
+ * @compile ExhaustiveEnumSwitch.java
  * @compile ExhaustiveEnumSwitchExtra.java
- * @run main/othervm --enable-preview ExhaustiveEnumSwitch
+ * @run main ExhaustiveEnumSwitch
  */
 
 public class ExhaustiveEnumSwitch {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitch-old.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitch-old.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,4 +1,4 @@
-ExpressionSwitch.java:40:16: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.switch.expressions)
-ExpressionSwitch.java:41:20: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.switch.rules)
-ExpressionSwitch.java:93:31: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.multiple.case.labels)
+ExpressionSwitch.java:40:16: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.switch.expressions), 9, 14
+ExpressionSwitch.java:41:20: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.switch.rules), 9, 14
+ExpressionSwitch.java:93:31: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.multiple.case.labels), 9, 14
 3 errors
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitch.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitch.java	Thu Nov 14 13:50:03 2019 +0000
@@ -3,8 +3,8 @@
  * @bug 8206986 8222169 8224031
  * @summary Check expression switch works.
  * @compile/fail/ref=ExpressionSwitch-old.out -source 9 -Xlint:-options -XDrawDiagnostics ExpressionSwitch.java
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitch.java
- * @run main/othervm --enable-preview ExpressionSwitch
+ * @compile ExpressionSwitch.java
+ * @run main ExpressionSwitch
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchBreaks1.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchBreaks1.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Verify behavior of various kinds of breaks.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchBreaks1.java
- * @run main/othervm --enable-preview ExpressionSwitchBreaks1
+ * @compile ExpressionSwitchBreaks1.java
+ * @run main ExpressionSwitchBreaks1
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchBreaks2.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchBreaks2.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Check behavior for invalid breaks.
- * @compile/fail/ref=ExpressionSwitchBreaks2.out -XDrawDiagnostics --enable-preview -source ${jdk.version} ExpressionSwitchBreaks2.java
+ * @compile/fail/ref=ExpressionSwitchBreaks2.out -XDrawDiagnostics ExpressionSwitchBreaks2.java
  */
 
 public class ExpressionSwitchBreaks2 {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchBreaks2.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchBreaks2.out	Thu Nov 14 13:50:03 2019 +0000
@@ -7,6 +7,4 @@
 ExpressionSwitchBreaks2.java:40:29: compiler.err.cant.resolve.location: kindname.variable, undef, , , (compiler.misc.location: kindname.class, ExpressionSwitchBreaks2, null)
 ExpressionSwitchBreaks2.java:45:22: compiler.err.break.outside.switch.expression
 ExpressionSwitchBreaks2.java:49:22: compiler.err.break.outside.switch.expression
-- compiler.note.preview.filename: ExpressionSwitchBreaks2.java
-- compiler.note.preview.recompile
 9 errors
\ No newline at end of file
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchBugs.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchBugs.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986 8214114 8214529
  * @summary Verify various corner cases with nested switch expressions.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchBugs.java
- * @run main/othervm --enable-preview ExpressionSwitchBugs
+ * @compile ExpressionSwitchBugs.java
+ * @run main ExpressionSwitchBugs
  */
 
 public class ExpressionSwitchBugs {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchBugsInGen.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchBugsInGen.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8214031
  * @summary Verify various corner cases with nested switch expressions.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchBugsInGen.java
- * @run main/othervm --enable-preview ExpressionSwitchBugsInGen
+ * @compile ExpressionSwitchBugsInGen.java
+ * @run main ExpressionSwitchBugsInGen
  */
 
 public class ExpressionSwitchBugsInGen {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchCodeFromJLS.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchCodeFromJLS.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Check switch expressions
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchCodeFromJLS.java
- * @run main/othervm --enable-preview ExpressionSwitchCodeFromJLS
+ * @compile ExpressionSwitchCodeFromJLS.java
+ * @run main ExpressionSwitchCodeFromJLS
  */
 
 public class ExpressionSwitchCodeFromJLS {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchDA.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchDA.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Check definite (un)assignment for in switch expressions.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchDA.java
- * @run main/othervm --enable-preview ExpressionSwitchDA
+ * @compile ExpressionSwitchDA.java
+ * @run main ExpressionSwitchDA
  */
 
 public class ExpressionSwitchDA {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchEmbedding.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchEmbedding.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8214031 8214114
  * @summary Verify switch expressions embedded in various statements work properly.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchEmbedding.java
- * @run main/othervm --enable-preview ExpressionSwitchEmbedding
+ * @compile ExpressionSwitchEmbedding.java
+ * @run main ExpressionSwitchEmbedding
  */
 
 public class ExpressionSwitchEmbedding {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchFallThrough.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchFallThrough.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Check fall through in switch expressions.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchFallThrough.java
- * @run main/othervm --enable-preview ExpressionSwitchFallThrough
+ * @compile ExpressionSwitchFallThrough.java
+ * @run main ExpressionSwitchFallThrough
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchFallThrough1.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchFallThrough1.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Check fall through in switch expressions.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchFallThrough1.java
- * @run main/othervm --enable-preview ExpressionSwitchFallThrough1
+ * @compile ExpressionSwitchFallThrough1.java
+ * @run main ExpressionSwitchFallThrough1
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchFlow.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchFlow.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8212982
  * @summary Verify a compile-time error is produced if switch expression does not provide a value
- * @compile/fail/ref=ExpressionSwitchFlow.out --enable-preview -source ${jdk.version} -XDrawDiagnostics ExpressionSwitchFlow.java
+ * @compile/fail/ref=ExpressionSwitchFlow.out -XDrawDiagnostics ExpressionSwitchFlow.java
  */
 
 public class ExpressionSwitchFlow {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchFlow.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchFlow.out	Thu Nov 14 13:50:03 2019 +0000
@@ -7,6 +7,4 @@
 ExpressionSwitchFlow.java:53:9: compiler.err.switch.expression.completes.normally
 ExpressionSwitchFlow.java:61:9: compiler.err.switch.expression.completes.normally
 ExpressionSwitchFlow.java:69:9: compiler.err.switch.expression.completes.normally
-- compiler.note.preview.filename: ExpressionSwitchFlow.java
-- compiler.note.preview.recompile
 9 errors
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchInExpressionSwitch.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchInExpressionSwitch.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Check switch expressions embedded in switch expressions.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchInExpressionSwitch.java
- * @run main/othervm --enable-preview ExpressionSwitchInExpressionSwitch
+ * @compile ExpressionSwitchInExpressionSwitch.java
+ * @run main ExpressionSwitchInExpressionSwitch
  */
 
 public class ExpressionSwitchInExpressionSwitch {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchInfer.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchInfer.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Check types inferred for switch expressions.
- * @compile/fail/ref=ExpressionSwitchInfer.out -XDrawDiagnostics --enable-preview -source ${jdk.version} ExpressionSwitchInfer.java
+ * @compile/fail/ref=ExpressionSwitchInfer.out -XDrawDiagnostics ExpressionSwitchInfer.java
  */
 
 import java.util.ArrayList;
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchInfer.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchInfer.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,6 +1,4 @@
 ExpressionSwitchInfer.java:17:95: compiler.err.cant.resolve.location.args: kindname.method, substring, , int, (compiler.misc.location: kindname.interface, java.lang.CharSequence, null)
 ExpressionSwitchInfer.java:26:38: compiler.err.cant.resolve.location.args: kindname.method, substring, , int, (compiler.misc.location: kindname.interface, java.lang.CharSequence, null)
 ExpressionSwitchInfer.java:30:23: compiler.err.prob.found.req: (compiler.misc.incompatible.type.in.switch.expression: (compiler.misc.inconvertible.types: int, java.lang.String))
-- compiler.note.preview.filename: ExpressionSwitchInfer.java
-- compiler.note.preview.recompile
 3 errors
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchIntersectionTypes.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchIntersectionTypes.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8206986
  * @summary Verify behavior when an intersection type is inferred for switch expression.
- * @compile --enable-preview -source ${jdk.version} ExpressionSwitchIntersectionTypes.java
- * @run main/othervm --enable-preview ExpressionSwitchIntersectionTypes
+ * @compile ExpressionSwitchIntersectionTypes.java
+ * @run main ExpressionSwitchIntersectionTypes
  */
 
 public class ExpressionSwitchIntersectionTypes<X  extends java.io.Serializable & Runnable> {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchNotExhaustive.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchNotExhaustive.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify behavior of not exhaustive switch expressions.
- * @compile/fail/ref=ExpressionSwitchNotExhaustive.out -XDrawDiagnostics --enable-preview -source ${jdk.version} ExpressionSwitchNotExhaustive.java
+ * @compile/fail/ref=ExpressionSwitchNotExhaustive.out -XDrawDiagnostics ExpressionSwitchNotExhaustive.java
  */
 
 public class ExpressionSwitchNotExhaustive {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchNotExhaustive.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchNotExhaustive.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,6 +1,4 @@
 ExpressionSwitchNotExhaustive.java:10:16: compiler.err.not.exhaustive
 ExpressionSwitchNotExhaustive.java:16:16: compiler.err.not.exhaustive
 ExpressionSwitchNotExhaustive.java:29:23: compiler.err.var.might.not.have.been.initialized: s
-- compiler.note.preview.filename: ExpressionSwitchNotExhaustive.java
-- compiler.note.preview.recompile
 3 errors
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchToString.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchToString.java	Thu Nov 14 13:50:03 2019 +0000
@@ -99,7 +99,7 @@
         String sourceVersion = Integer.toString(Runtime.version().feature());
 
         JavacTask ct = (JavacTask) tool.getTask(null, null, noErrors,
-            List.of("-XDdev", "--enable-preview", "-source", sourceVersion), null,
+            List.of("-XDdev"), null,
             Arrays.asList(new MyFileObject(CODE)));
         String actualCode = ct.parse().iterator().next().toString();
         actualCode = actualCode.replace(System.getProperty("line.separator"), "\n");
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchUnreachable.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchUnreachable.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify reachability in switch expressions.
- * @compile/fail/ref=ExpressionSwitchUnreachable.out -XDrawDiagnostics --enable-preview -source ${jdk.version} ExpressionSwitchUnreachable.java
+ * @compile/fail/ref=ExpressionSwitchUnreachable.out -XDrawDiagnostics ExpressionSwitchUnreachable.java
  */
 
 public class ExpressionSwitchUnreachable {
--- a/test/langtools/tools/javac/switchexpr/ExpressionSwitchUnreachable.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ExpressionSwitchUnreachable.out	Thu Nov 14 13:50:03 2019 +0000
@@ -4,6 +4,4 @@
 ExpressionSwitchUnreachable.java:37:17: compiler.err.unreachable.stmt
 ExpressionSwitchUnreachable.java:45:17: compiler.err.unreachable.stmt
 ExpressionSwitchUnreachable.java:52:17: compiler.err.unreachable.stmt
-- compiler.note.preview.filename: ExpressionSwitchUnreachable.java
-- compiler.note.preview.recompile
 6 errors
\ No newline at end of file
--- a/test/langtools/tools/javac/switchexpr/LambdaCapture.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/LambdaCapture.java	Thu Nov 14 13:50:03 2019 +0000
@@ -26,7 +26,7 @@
  * @bug 8220041
  * @summary Verify variable capture works inside switch expressions which are
  *          inside variable declarations
- * @compile --enable-preview -source ${jdk.version} LambdaCapture.java
+ * @compile LambdaCapture.java
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchexpr/ParseIncomplete.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ParseIncomplete.java	Thu Nov 14 13:50:03 2019 +0000
@@ -68,7 +68,7 @@
             StringWriter out = new StringWriter();
             try {
                 JavacTask ct = (JavacTask) tool.getTask(out, null, noErrors,
-                    List.of("-XDdev", "--enable-preview", "-source", sourceVersion), null,
+                    List.of("-XDdev"), null,
                     Arrays.asList(new MyFileObject(code)));
                 ct.parse().iterator().next();
             } catch (Throwable t) {
--- a/test/langtools/tools/javac/switchexpr/ParserRecovery.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ParserRecovery.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify the parser handles broken input gracefully.
- * @compile/fail/ref=ParserRecovery.out -XDrawDiagnostics --enable-preview -source ${jdk.version} ParserRecovery.java
+ * @compile/fail/ref=ParserRecovery.out -XDrawDiagnostics ParserRecovery.java
  */
 
 public class ParserRecovery {
--- a/test/langtools/tools/javac/switchexpr/ParserRecovery.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/ParserRecovery.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,3 @@
 ParserRecovery.java:10:39: compiler.err.expected2: :, ->
 ParserRecovery.java:13:31: compiler.err.expected2: :, ->
-- compiler.note.preview.filename: ParserRecovery.java
-- compiler.note.preview.recompile
 2 errors
--- a/test/langtools/tools/javac/switchexpr/SwitchExpressionIsNotAConstant.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/SwitchExpressionIsNotAConstant.java	Thu Nov 14 13:50:03 2019 +0000
@@ -26,8 +26,8 @@
  * @bug 8214113
  * @summary Verify the switch expression's type does not have a constant attached,
  *          and so the switch expression is not elided.
- * @compile --enable-preview --source ${jdk.version} SwitchExpressionIsNotAConstant.java
- * @run main/othervm --enable-preview SwitchExpressionIsNotAConstant
+ * @compile SwitchExpressionIsNotAConstant.java
+ * @run main SwitchExpressionIsNotAConstant
  */
 public class SwitchExpressionIsNotAConstant {
 
--- a/test/langtools/tools/javac/switchexpr/SwitchExpressionScopesIsolated.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/SwitchExpressionScopesIsolated.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify that scopes in rule cases are isolated.
- * @compile/fail/ref=SwitchExpressionScopesIsolated.out -XDrawDiagnostics --enable-preview -source ${jdk.version} SwitchExpressionScopesIsolated.java
+ * @compile/fail/ref=SwitchExpressionScopesIsolated.out -XDrawDiagnostics SwitchExpressionScopesIsolated.java
  */
 
 public class SwitchExpressionScopesIsolated {
--- a/test/langtools/tools/javac/switchexpr/SwitchExpressionScopesIsolated.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/SwitchExpressionScopesIsolated.out	Thu Nov 14 13:50:03 2019 +0000
@@ -2,6 +2,4 @@
 SwitchExpressionScopesIsolated.java:13:41: compiler.err.cant.resolve.location: kindname.variable, res, , , (compiler.misc.location: kindname.class, SwitchExpressionScopesIsolated, null)
 SwitchExpressionScopesIsolated.java:14:26: compiler.err.cant.resolve.location: kindname.variable, res, , , (compiler.misc.location: kindname.class, SwitchExpressionScopesIsolated, null)
 SwitchExpressionScopesIsolated.java:14:42: compiler.err.cant.resolve.location: kindname.variable, res, , , (compiler.misc.location: kindname.class, SwitchExpressionScopesIsolated, null)
-- compiler.note.preview.filename: SwitchExpressionScopesIsolated.java
-- compiler.note.preview.recompile
 4 errors
--- a/test/langtools/tools/javac/switchexpr/SwitchExpressionSimpleVisitorTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/SwitchExpressionSimpleVisitorTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -107,7 +107,7 @@
 
         StringWriter out = new StringWriter();
         JavacTask ct = (JavacTask) tool.getTask(out, null, noErrors,
-            List.of("--enable-preview", "-source", Integer.toString(Runtime.version().feature())), null,
+            List.of(), null,
             Arrays.asList(new MyFileObject(code)));
         return ct.parse().iterator().next();
     }
--- a/test/langtools/tools/javac/switchexpr/TryCatch.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/TryCatch.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8214114
  * @summary Verify try-catch inside a switch expression works properly.
- * @compile --enable-preview -source ${jdk.version} TryCatch.java
- * @run main/othervm --enable-preview TryCatch
+ * @compile TryCatch.java
+ * @run main TryCatch
  */
 public class TryCatch {
     public static void main(String[] args) {
--- a/test/langtools/tools/javac/switchexpr/TryCatchFinally.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/TryCatchFinally.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,8 +25,8 @@
  * @test
  * @bug 8220018
  * @summary Verify that try-catch-finally inside a switch expression works properly.
- * @compile --enable-preview -source ${jdk.version} TryCatchFinally.java
- * @run main/othervm --enable-preview TryCatchFinally
+ * @compile TryCatchFinally.java
+ * @run main TryCatchFinally
  */
 public class TryCatchFinally {//TODO: yield <double>
     public static void main(String[] args) {
--- a/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * @bug 8223305 8226522
  * @summary Verify correct warnings w.r.t. yield
- * @compile/ref=WarnWrongYieldTest.out -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=ATTR WarnWrongYieldTest.java
+ * @compile/ref=WarnWrongYieldTest.out -Xlint:-options -source 13 -XDrawDiagnostics -XDshould-stop.at=ATTR WarnWrongYieldTest.java
  */
 
 package t;
--- a/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/WarnWrongYieldTest.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,8 +1,8 @@
-WarnWrongYieldTest.java:39:11: compiler.warn.restricted.type.not.allowed.preview: yield, 13
-WarnWrongYieldTest.java:45:5: compiler.warn.restricted.type.not.allowed.preview: yield, 13
-WarnWrongYieldTest.java:72:15: compiler.warn.restricted.type.not.allowed.preview: yield, 13
-WarnWrongYieldTest.java:75:15: compiler.warn.restricted.type.not.allowed.preview: yield, 13
-WarnWrongYieldTest.java:81:21: compiler.warn.restricted.type.not.allowed.preview: yield, 13
+WarnWrongYieldTest.java:39:11: compiler.warn.restricted.type.not.allowed: yield, 14
+WarnWrongYieldTest.java:45:5: compiler.warn.restricted.type.not.allowed: yield, 14
+WarnWrongYieldTest.java:72:15: compiler.warn.restricted.type.not.allowed: yield, 14
+WarnWrongYieldTest.java:75:15: compiler.warn.restricted.type.not.allowed: yield, 14
+WarnWrongYieldTest.java:81:21: compiler.warn.restricted.type.not.allowed: yield, 14
 WarnWrongYieldTest.java:93:9: compiler.warn.invalid.yield
 WarnWrongYieldTest.java:98:9: compiler.warn.invalid.yield
 WarnWrongYieldTest.java:103:9: compiler.warn.invalid.yield
@@ -11,8 +11,8 @@
 WarnWrongYieldTest.java:118:9: compiler.warn.invalid.yield
 WarnWrongYieldTest.java:123:22: compiler.warn.invalid.yield
 WarnWrongYieldTest.java:152:24: compiler.warn.invalid.yield
-WarnWrongYieldTest.java:164:18: compiler.warn.restricted.type.not.allowed.preview: yield, 13
-WarnWrongYieldTest.java:168:23: compiler.warn.restricted.type.not.allowed.preview: yield, 13
+WarnWrongYieldTest.java:164:18: compiler.warn.restricted.type.not.allowed: yield, 14
+WarnWrongYieldTest.java:168:23: compiler.warn.restricted.type.not.allowed: yield, 14
 WarnWrongYieldTest.java:34:28: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:45:5: compiler.warn.illegal.ref.to.restricted.type: yield
 WarnWrongYieldTest.java:168:23: compiler.warn.illegal.ref.to.restricted.type: yield
--- a/test/langtools/tools/javac/switchexpr/WrongBreakTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/WrongBreakTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * @bug 8223305
  * @summary Ensure javac is not crashing for wrong breaks.
- * @compile/fail/ref=WrongBreakTest.out --enable-preview -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=FLOW WrongBreakTest.java
+ * @compile/fail/ref=WrongBreakTest.out -XDrawDiagnostics -XDshould-stop.at=FLOW WrongBreakTest.java
  */
 
 public class WrongBreakTest {
--- a/test/langtools/tools/javac/switchexpr/WrongBreakTest.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/WrongBreakTest.out	Thu Nov 14 13:50:03 2019 +0000
@@ -4,6 +4,4 @@
 WrongBreakTest.java:36:9: compiler.err.ref.ambiguous: test, kindname.method, test(int), WrongBreakTest, kindname.method, test(java.lang.Object), WrongBreakTest
 WrongBreakTest.java:38:13: compiler.err.no.switch.expression
 WrongBreakTest.java:41:13: compiler.err.no.switch.expression
-- compiler.note.preview.filename: WrongBreakTest.java
-- compiler.note.preview.recompile
 6 errors
--- a/test/langtools/tools/javac/switchexpr/WrongYieldTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/WrongYieldTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -25,7 +25,7 @@
  * @test
  * @bug 8223305 8226522
  * @summary Ensure proper errors are returned for yields.
- * @compile/fail/ref=WrongYieldTest.out --enable-preview -source ${jdk.version} -XDrawDiagnostics -XDshould-stop.at=ATTR WrongYieldTest.java
+ * @compile/fail/ref=WrongYieldTest.out -XDrawDiagnostics -XDshould-stop.at=ATTR WrongYieldTest.java
  */
 
 package t;
--- a/test/langtools/tools/javac/switchexpr/WrongYieldTest.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchexpr/WrongYieldTest.out	Thu Nov 14 13:50:03 2019 +0000
@@ -26,6 +26,4 @@
 WrongYieldTest.java:201:9: compiler.err.no.switch.expression
 WrongYieldTest.java:202:9: compiler.err.no.switch.expression
 WrongYieldTest.java:216:24: compiler.err.illegal.ref.to.restricted.type: yield
-- compiler.note.preview.filename: WrongYieldTest.java
-- compiler.note.preview.recompile
 28 errors
--- a/test/langtools/tools/javac/switchextra/CaseTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/CaseTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -137,7 +137,7 @@
 
         StringWriter out = new StringWriter();
         JavacTask ct = (JavacTask) tool.getTask(out, null, noErrors,
-            List.of("-XDdev", "--enable-preview", "-source", sourceVersion), null,
+            List.of("-XDdev"), null,
             Arrays.asList(new MyFileObject(code)));
         return ct.parse().iterator().next();
     }
--- a/test/langtools/tools/javac/switchextra/DefiniteAssignment1.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/DefiniteAssignment1.java	Thu Nov 14 13:50:03 2019 +0000
@@ -24,8 +24,8 @@
 /**
  * @test
  * @summary Verify that definite assignment works (legal code)
- * @compile --enable-preview -source ${jdk.version} DefiniteAssignment1.java
- * @run main/othervm --enable-preview DefiniteAssignment1
+ * @compile DefiniteAssignment1.java
+ * @run main DefiniteAssignment1
  */
 public class DefiniteAssignment1 {
     public static void main(String[] args) {
--- a/test/langtools/tools/javac/switchextra/DefiniteAssignment2.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/DefiniteAssignment2.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,7 +1,7 @@
 /**
  * @test /nodynamiccopyright/
  * @summary Verify that definite assignment works (illegal code)
- * @compile/fail/ref=DefiniteAssignment2.out -XDrawDiagnostics --enable-preview -source ${jdk.version} DefiniteAssignment2.java
+ * @compile/fail/ref=DefiniteAssignment2.out -XDrawDiagnostics DefiniteAssignment2.java
  */
 public class DefiniteAssignment2 {
 
--- a/test/langtools/tools/javac/switchextra/DefiniteAssignment2.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/DefiniteAssignment2.out	Thu Nov 14 13:50:03 2019 +0000
@@ -4,6 +4,4 @@
 DefiniteAssignment2.java:52:28: compiler.err.var.might.not.have.been.initialized: x
 DefiniteAssignment2.java:62:28: compiler.err.var.might.not.have.been.initialized: x
 DefiniteAssignment2.java:73:28: compiler.err.var.might.not.have.been.initialized: x
-- compiler.note.preview.filename: DefiniteAssignment2.java
-- compiler.note.preview.recompile
 6 errors
--- a/test/langtools/tools/javac/switchextra/MultipleLabelsExpression-old.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/MultipleLabelsExpression-old.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,4 +1,4 @@
-MultipleLabelsExpression.java:31:16: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.switch.expressions)
-MultipleLabelsExpression.java:32:20: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.switch.rules)
-MultipleLabelsExpression.java:33:19: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.multiple.case.labels)
+MultipleLabelsExpression.java:31:16: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.switch.expressions), 9, 14
+MultipleLabelsExpression.java:32:20: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.switch.rules), 9, 14
+MultipleLabelsExpression.java:33:19: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.multiple.case.labels), 9, 14
 3 errors
--- a/test/langtools/tools/javac/switchextra/MultipleLabelsExpression.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/MultipleLabelsExpression.java	Thu Nov 14 13:50:03 2019 +0000
@@ -3,8 +3,8 @@
  * @bug 8206986
  * @summary Verify cases with multiple labels work properly.
  * @compile/fail/ref=MultipleLabelsExpression-old.out -source 9 -Xlint:-options -XDrawDiagnostics MultipleLabelsExpression.java
- * @compile --enable-preview -source ${jdk.version} MultipleLabelsExpression.java
- * @run main/othervm --enable-preview MultipleLabelsExpression
+ * @compile MultipleLabelsExpression.java
+ * @run main MultipleLabelsExpression
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchextra/MultipleLabelsStatement-old.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/MultipleLabelsStatement-old.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,2 +1,2 @@
-MultipleLabelsStatement.java:35:21: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.multiple.case.labels)
+MultipleLabelsStatement.java:35:21: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.multiple.case.labels), 9, 14
 1 error
--- a/test/langtools/tools/javac/switchextra/MultipleLabelsStatement.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/MultipleLabelsStatement.java	Thu Nov 14 13:50:03 2019 +0000
@@ -3,8 +3,8 @@
  * @bug 8206986
  * @summary Verify cases with multiple labels work properly.
  * @compile/fail/ref=MultipleLabelsStatement-old.out -source 9 -Xlint:-options -XDrawDiagnostics MultipleLabelsStatement.java
- * @compile --enable-preview -source ${jdk.version} MultipleLabelsStatement.java
- * @run main/othervm --enable-preview MultipleLabelsStatement
+ * @compile MultipleLabelsStatement.java
+ * @run main MultipleLabelsStatement
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchextra/RuleParsingTest.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/RuleParsingTest.java	Thu Nov 14 13:50:03 2019 +0000
@@ -95,7 +95,7 @@
 
         StringWriter out = new StringWriter();
         JavacTask ct = (JavacTask) tool.getTask(out, null, noErrors,
-            List.of("--enable-preview", "-source", sourceVersion), null,
+            List.of(), null,
             Arrays.asList(new MyFileObject(code.toString())));
         CompilationUnitTree cut = ct.parse().iterator().next();
         Trees trees = Trees.instance(ct);
--- a/test/langtools/tools/javac/switchextra/SwitchArrowBrokenConstant.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchArrowBrokenConstant.java	Thu Nov 14 13:50:03 2019 +0000
@@ -3,7 +3,7 @@
  * @bug 8206986
  * @summary Verify reasonable errors are produced when neither ':' nor '->'
  *          is found are the expression of a case
- * @compile/fail/ref=SwitchArrowBrokenConstant.out -source ${jdk.version} --enable-preview -Xlint:-preview -XDrawDiagnostics SwitchArrowBrokenConstant.java
+ * @compile/fail/ref=SwitchArrowBrokenConstant.out -Xlint:-preview -XDrawDiagnostics SwitchArrowBrokenConstant.java
  */
 
 public class SwitchArrowBrokenConstant {
--- a/test/langtools/tools/javac/switchextra/SwitchArrowBrokenConstant.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchArrowBrokenConstant.out	Thu Nov 14 13:50:03 2019 +0000
@@ -6,6 +6,4 @@
 SwitchArrowBrokenConstant.java:22:19: compiler.err.expected2: :, ->
 SwitchArrowBrokenConstant.java:25:20: compiler.err.expected2: :, ->
 SwitchArrowBrokenConstant.java:28:20: compiler.err.expected2: :, ->
-- compiler.note.preview.filename: SwitchArrowBrokenConstant.java
-- compiler.note.preview.recompile
 8 errors
--- a/test/langtools/tools/javac/switchextra/SwitchStatementArrow-old.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementArrow-old.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,3 +1,3 @@
-SwitchStatementArrow.java:41:20: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.switch.rules)
-SwitchStatementArrow.java:42:21: compiler.err.preview.feature.disabled.plural: (compiler.misc.feature.multiple.case.labels)
+SwitchStatementArrow.java:41:20: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.switch.rules), 9, 14
+SwitchStatementArrow.java:42:21: compiler.err.feature.not.supported.in.source.plural: (compiler.misc.feature.multiple.case.labels), 9, 14
 2 errors
--- a/test/langtools/tools/javac/switchextra/SwitchStatementArrow.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementArrow.java	Thu Nov 14 13:50:03 2019 +0000
@@ -3,8 +3,8 @@
  * @bug 8206986
  * @summary Verify rule cases work properly.
  * @compile/fail/ref=SwitchStatementArrow-old.out -source 9 -Xlint:-options -XDrawDiagnostics SwitchStatementArrow.java
- * @compile --enable-preview -source ${jdk.version} SwitchStatementArrow.java
- * @run main/othervm --enable-preview SwitchStatementArrow
+ * @compile SwitchStatementArrow.java
+ * @run main SwitchStatementArrow
  */
 
 import java.util.Objects;
--- a/test/langtools/tools/javac/switchextra/SwitchStatementBroken.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementBroken.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify that rule and ordinary cases cannot be mixed.
- * @compile/fail/ref=SwitchStatementBroken.out -XDrawDiagnostics --enable-preview -source ${jdk.version} SwitchStatementBroken.java
+ * @compile/fail/ref=SwitchStatementBroken.out -XDrawDiagnostics SwitchStatementBroken.java
  */
 
 public class SwitchStatementBroken {
--- a/test/langtools/tools/javac/switchextra/SwitchStatementBroken.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementBroken.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,4 +1,2 @@
 SwitchStatementBroken.java:15:13: compiler.err.switch.mixing.case.types
-- compiler.note.preview.filename: SwitchStatementBroken.java
-- compiler.note.preview.recompile
 1 error
--- a/test/langtools/tools/javac/switchextra/SwitchStatementBroken2.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementBroken2.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify that not allowed types of statements cannot be used in rule case.
- * @compile/fail/ref=SwitchStatementBroken2.out -XDrawDiagnostics --enable-preview -source ${jdk.version} SwitchStatementBroken2.java
+ * @compile/fail/ref=SwitchStatementBroken2.out -XDrawDiagnostics SwitchStatementBroken2.java
  */
 
 public class SwitchStatementBroken2 {
--- a/test/langtools/tools/javac/switchextra/SwitchStatementBroken2.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementBroken2.out	Thu Nov 14 13:50:03 2019 +0000
@@ -3,6 +3,4 @@
 SwitchStatementBroken2.java:19:23: compiler.err.switch.case.unexpected.statement
 SwitchStatementBroken2.java:22:27: compiler.err.variable.not.allowed
 SwitchStatementBroken2.java:23:24: compiler.err.switch.case.unexpected.statement
-- compiler.note.preview.filename: SwitchStatementBroken2.java
-- compiler.note.preview.recompile
 5 errors
--- a/test/langtools/tools/javac/switchextra/SwitchStatementScopesIsolated.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementScopesIsolated.java	Thu Nov 14 13:50:03 2019 +0000
@@ -2,7 +2,7 @@
  * @test /nodynamiccopyright/
  * @bug 8206986
  * @summary Verify that scopes in rule cases are isolated.
- * @compile/fail/ref=SwitchStatementScopesIsolated.out -XDrawDiagnostics --enable-preview -source ${jdk.version} SwitchStatementScopesIsolated.java
+ * @compile/fail/ref=SwitchStatementScopesIsolated.out -XDrawDiagnostics SwitchStatementScopesIsolated.java
  */
 
 public class SwitchStatementScopesIsolated {
--- a/test/langtools/tools/javac/switchextra/SwitchStatementScopesIsolated.out	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/javac/switchextra/SwitchStatementScopesIsolated.out	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,3 @@
 SwitchStatementScopesIsolated.java:13:25: compiler.err.cant.resolve.location: kindname.variable, res, , , (compiler.misc.location: kindname.class, SwitchStatementScopesIsolated, null)
 SwitchStatementScopesIsolated.java:14:26: compiler.err.cant.resolve.location: kindname.variable, res, , , (compiler.misc.location: kindname.class, SwitchStatementScopesIsolated, null)
-- compiler.note.preview.filename: SwitchStatementScopesIsolated.java
-- compiler.note.preview.recompile
 2 errors
--- a/test/langtools/tools/jdeps/listdeps/ListModuleDeps.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/langtools/tools/jdeps/listdeps/ListModuleDeps.java	Thu Nov 14 13:50:03 2019 +0000
@@ -92,7 +92,6 @@
     public Object[][] jdkModules() {
         return new Object[][]{
             {"jdk.compiler", new String[]{
-                                "java.base/jdk.internal",
                                 "java.base/jdk.internal.jmod",
                                 "java.base/jdk.internal.misc",
                                 "java.base/sun.reflect.annotation",
--- a/test/lib/jdk/test/lib/Utils.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/lib/jdk/test/lib/Utils.java	Thu Nov 14 13:50:03 2019 +0000
@@ -198,8 +198,7 @@
      * @return A copy of given opts with all GC options removed.
      */
     private static final Pattern useGcPattern = Pattern.compile(
-            "(?:\\-XX\\:[\\+\\-]Use.+GC)"
-            + "|(?:\\-Xconcgc)");
+            "(?:\\-XX\\:[\\+\\-]Use.+GC)");
     public static List<String> removeGcOpts(List<String> opts) {
         List<String> optsWithoutGC = new ArrayList<String>();
         for (String opt : opts) {
--- a/test/lib/jdk/test/lib/jfr/GCHelper.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/lib/jdk/test/lib/jfr/GCHelper.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -68,12 +68,10 @@
     public static final String event_phases_level_3 = EventNames.GCPhasePauseLevel3;
 
     public static final String gcG1New = "G1New";
-    public static final String gcParNew = "ParNew";
     public static final String gcDefNew = "DefNew";
     public static final String gcParallelScavenge = "ParallelScavenge";
     public static final String gcG1Old = "G1Old";
     public static final String gcG1Full = "G1Full";
-    public static final String gcConcurrentMarkSweep = "ConcurrentMarkSweep";
     public static final String gcSerialOld = "SerialOld";
     public static final String gcPSMarkSweep = "PSMarkSweep";
     public static final String gcParallelOld = "ParallelOld";
@@ -174,26 +172,21 @@
         beanCollectorTypes.put("G1 Young Generation", true);
         beanCollectorTypes.put("Copy", true);
         beanCollectorTypes.put("PS Scavenge", true);
-        beanCollectorTypes.put("ParNew", true);
 
         // old GarbageCollectionMXBeans.
         beanCollectorTypes.put("G1 Old Generation", false);
-        beanCollectorTypes.put("ConcurrentMarkSweep", false);
         beanCollectorTypes.put("PS MarkSweep", false);
         beanCollectorTypes.put("MarkSweepCompact", false);
 
         // List of expected collector overrides. "A.B" means that collector A may use collector B.
         collectorOverrides.add("G1Old.G1Full");
-        collectorOverrides.add("ConcurrentMarkSweep.SerialOld");
         collectorOverrides.add("SerialOld.PSMarkSweep");
 
         requiredEvents.put(gcG1New, new String[] {event_heap_summary, event_young_garbage_collection});
-        requiredEvents.put(gcParNew, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_young_garbage_collection});
         requiredEvents.put(gcDefNew, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_young_garbage_collection});
         requiredEvents.put(gcParallelScavenge, new String[] {event_heap_summary, event_heap_ps_summary, event_heap_metaspace_summary, event_reference_statistics, event_phases_pause, event_phases_level_1, event_young_garbage_collection});
         requiredEvents.put(gcG1Old, new String[] {event_heap_summary, event_old_garbage_collection});
         requiredEvents.put(gcG1Full, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_old_garbage_collection});
-        requiredEvents.put(gcConcurrentMarkSweep, new String[] {event_phases_pause, event_phases_level_1, event_old_garbage_collection});
         requiredEvents.put(gcSerialOld, new String[] {event_heap_summary, event_heap_metaspace_summary, event_phases_pause, event_phases_level_1, event_old_garbage_collection});
         requiredEvents.put(gcParallelOld, new String[] {event_heap_summary, event_heap_ps_summary, event_heap_metaspace_summary, event_reference_statistics, event_phases_pause, event_phases_level_1, event_old_garbage_collection, event_parold_garbage_collection});
 
--- a/test/lib/sun/hotspot/WhiteBox.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/lib/sun/hotspot/WhiteBox.java	Thu Nov 14 13:50:03 2019 +0000
@@ -193,6 +193,9 @@
     return parseCommandLine0(commandline, delim, args);
   }
 
+  public native int g1ActiveMemoryNodeCount();
+  public native int[] g1MemoryNodeIds();
+
   // Parallel GC
   public native long psVirtualSpaceAlignment();
   public native long psHeapGenerationAlignment();
@@ -384,7 +387,6 @@
   public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
   public native long incMetaspaceCapacityUntilGC(long increment);
   public native long metaspaceCapacityUntilGC();
-  public native boolean metaspaceShouldConcurrentCollect();
   public native long metaspaceReserveAlignment();
 
   // Don't use these methods directly
--- a/test/lib/sun/hotspot/gc/GC.java	Fri Nov 08 14:54:17 2019 +0000
+++ b/test/lib/sun/hotspot/gc/GC.java	Thu Nov 14 13:50:03 2019 +0000
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -35,11 +35,10 @@
      */
     Serial(1),
     Parallel(2),
-    ConcMarkSweep(3),
-    G1(4),
-    Epsilon(5),
-    Z(6),
-    Shenandoah(7);
+    G1(3),
+    Epsilon(4),
+    Z(5),
+    Shenandoah(6);
 
     private static final WhiteBox WB = WhiteBox.getWhiteBox();
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/micro/org/openjdk/bench/vm/compiler/BitSetAndReset.java	Thu Nov 14 13:50:03 2019 +0000
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.openjdk.bench.vm.compiler;
+
+import org.openjdk.jmh.annotations.*;
+import org.openjdk.jmh.infra.*;
+
+import java.util.concurrent.TimeUnit;
+
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+@State(Scope.Thread)
+public class BitSetAndReset {
+    private static final int COUNT = 10_000;
+
+    private static final long MASK63 = 0x8000_0000_0000_0000L;
+    private static final long MASK31 = 0x0000_0000_8000_0000L;
+    private static final long MASK15 = 0x0000_0000_0000_8000L;
+    private static final long MASK00 = 0x0000_0000_0000_0001L;
+
+    private long andq, orq;
+    private boolean success = true;
+
+    @TearDown(Level.Iteration)
+    public void finish() {
+        if (!success)
+            throw new AssertionError("Failure while setting or clearing long vector bits!");
+    }
+
+    @Benchmark
+    public void bitSet(Blackhole bh) {
+        for (int i=0; i<COUNT; i++) {
+            andq = MASK63 | MASK31 | MASK15 | MASK00;
+            orq = 0;
+            bh.consume(test63());
+            bh.consume(test31());
+            bh.consume(test15());
+            bh.consume(test00());
+            success &= andq == 0 && orq == (MASK63 | MASK31 | MASK15 | MASK00);
+        }
+    }
+
+    private long test63() {
+        andq &= ~MASK63;
+        orq |= MASK63;
+        return 0L;
+    }
+    private long test31() {
+        andq &= ~MASK31;
+        orq |= MASK31;
+        return 0L;
+    }
+    private long test15() {
+        andq &= ~MASK15;
+        orq |= MASK15;
+        return 0L;
+    }
+    private long test00() {
+        andq &= ~MASK00;
+        orq |= MASK00;
+        return 0L;
+    }
+
+    private static final long MASK62 = 0x4000_0000_0000_0000L;
+    private static final long MASK61 = 0x2000_0000_0000_0000L;
+    private static final long MASK60 = 0x1000_0000_0000_0000L;
+
+    private long orq63, orq62, orq61, orq60;
+
+    @Benchmark
+    public void throughput(Blackhole bh) {
+        for (int i=0; i<COUNT; i++) {
+            orq63 = orq62 = orq61 = orq60 = 0;
+            bh.consume(testTp());
+        }
+    }
+
+    private long testTp() {
+        orq63 |= MASK63;
+        orq62 |= MASK62;
+        orq61 |= MASK61;
+        orq60 |= MASK60;
+        return 0L;
+    }
+}